diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock index b7cd52b373df615734d58136dbfc655b9db48e17..65ee77948e9854831d80f2e533bf7a2de65348a6 100644 --- a/polkadot/Cargo.lock +++ b/polkadot/Cargo.lock @@ -743,20 +743,6 @@ dependencies = [ "sp-version", ] -[[package]] -name = "bp-rialto" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-runtime", - "frame-support", - "frame-system", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std", -] - [[package]] name = "bp-rococo" version = "0.1.0" @@ -779,6 +765,7 @@ version = "0.1.0" dependencies = [ "frame-support", "hash-db", + "hex-literal", "num-traits", "parity-scale-codec", "scale-info", @@ -4749,7 +4736,6 @@ dependencies = [ "log", "parity-scale-codec", "scale-info", - "serde", "sp-core", "sp-io", "sp-runtime", @@ -4772,6 +4758,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serde", + "sp-core", "sp-finality-grandpa", "sp-io", "sp-runtime", @@ -4786,7 +4773,6 @@ dependencies = [ "bitvec", "bp-message-dispatch", "bp-messages", - "bp-rialto", "bp-runtime", "frame-benchmarking", "frame-support", diff --git a/polkadot/bridges/.config/lingua.dic b/polkadot/bridges/.config/lingua.dic index 1fc67a5a5d249488b36cb04ba514f39b10fe3b29..f4b4d55d6e88224c83193253245fb65fe761793b 100644 --- a/polkadot/bridges/.config/lingua.dic +++ b/polkadot/bridges/.config/lingua.dic @@ -1,33 +1,111 @@ 90 -annualised/MS -Apache-2.0/M -AccountId/MS -api/SM -auth -auths/SM + +&& +1KB +1MB +5MB += API/SM APIs -arg -args -aren -async +AccountId/MS +Apache-2.0/M +Autogenerated +BFT/M +BTC/S Best/MS -benchmarking/MS BlockId -BFT/M -bitfield/MS -blake2/MS -blockchain/MS -boolean -borked -BridgeStorage BlockNumber -BTC/S +BridgeStorage CLI/MS Chain1 Chain2 ChainSpec ChainTime +DOT/S +ERC-20 +Ethereum +FN +FinalizationError +GPL/M +GPLv3/M +GiB/S +Handler/MS +Hasher +HeaderA +HeaderId +InitiateChange +Instance1 +Instance2 +Instance42 +InstantCurrencyPayments +KSM/S +KYC/M +keypair/MS +KeyPair +Kovan +Lane1 +Lane2 +Lane3 +LaneId +MIN_SIZE +MIT/M +MMR +MaxUnrewardedRelayerEntriesAtInboundLane +MaybeExtra +MaybeOrphan +Merklized +MessageNonce +MessageNonces +MessagePayload +MetricsParams +Millau/MS +OldHeader +OutboundMessages +PoA +PoV/MS +Pre +RLP +RPC/MS +Rialto/MS +Relayer/MS +Runtime1 +Runtime2 +SIZE_FACTOR +SS58 +SS58Prefix +STALL_SYNC_TIMEOUT +SURI +ServiceFactory/MS +SignedExtension +Stringified +Submitter1 +S|N +TCP +ThisChain +TODO +U256 +Unparsed +Vec +WND/S +Westend/MS +Wococo/MS +XCM/S +XCMP/M +annualised/MS +api/SM +aren +arg +args +async +auth +auths/SM +backoff +benchmarking/MS +best_substrate_header +bitfield/MS +blake2/MS +blockchain/MS +borked chain_getBlock choosen config/MS @@ -36,146 +114,128 @@ crypto/MS customizable/B Debian/M decodable/MS -DOT/S -doesn +delivery_and_dispatch_fee +dev +dispatchable dispatchables +doesn ed25519 enum/MS -ERC-20 +entrypoint/MS ethereum/MS externality/MS extrinsic/MS extrinsics fedora/M -FN -FinalizationError -GiB/S -GPL/M -GPLv3/M -Handler/MS -HeaderA -HeaderId +functor +fuzzer +hasher +hardcoded https implementers +include/BG inherent/MS initialize/RG instantiate/B intrinsic/MS -intrinsics -InitiateChange -isn +invariant/MS +invariants io +isn +isolate/BG js +jsonrpsee +keccak keccak256/M -keypair/MS -KSM/S -Lane1 -Lane2 -Lane3 -LaneId +keyring +keystore/MS kusama/S -KYC/M -keccak -Kovan +lane +malus +max_value merkle/MS -MessageNonce -MessageNonces -Merklized -MaybeOrphan -MaybeExtra -MetricsParams -MessagePayload +metadata +millau misbehavior/SM misbehaviors -MIN_SIZE -MIT/M -max_value multivalidator/SM natively -OldHeader +no_std nonces number -no_std ok oneshot/MS others' -OutboundMessages +pallet_bridge_grandpa +pallet_bridge_messages +pallet_message_lane parablock/MS parachain/MS +param/MS parameterize/D -pallet_message_lane plancks polkadot/MS pov-block/MS -PoA -PoV/MS precommit prometheus proxying -prune_end -prune_depth provisioner/MS +probabilistically +prune_depth +prune_end +receival +reconnection redhat/M repo/MS -receival -RPC/MS -RLP runtime/MS -Runtime1 -Runtime2 rustc/MS -ServiceFactory/MS -SignedExtension -SIZE_FACTOR +relayer/MS +shouldn +source_at_target +source_latest_confirmed +source_latest_generated +sp_finality_grandpa +spawner sr25519 -SS58 -SS58Prefix src -S|N -SURI -source +stringified struct/MS -Submitter1 submitters/MS subsystem/MS subsystems' -shouldn +subcommand/MS synchronizer +target_at_source +target_latest_confirmed +target_latest_received taskmanager/MS teleport/RG teleportation/SM teleporter/SM teleporters testnet/MS +timeframe +tokio +timestamp trie/MS trustless/Y -ThisChain -TCP +tuple +u32 ubuntu/M -union/MSG undeliverable unfinalized +union/MSG unpruned unservable/B unsynced +updatable +validator/SM ve vec -Vec -validator/SM verifier w3f/MS +wakeup wasm/M -WND/S -XCM/S -XCMP/M -include/BG -isolate/BG -Instance1 -Instance2 -Instance42 -Pre -Rialto -stringified -Stringified -millau -Millau +websocket +x2 +~ diff --git a/polkadot/bridges/.editorconfig b/polkadot/bridges/.editorconfig index d67ffe8f90f4fadd4771c4423438d6e3c08b253c..e2375881ea0616906a877183bd8cfc625ed42357 100644 --- a/polkadot/bridges/.editorconfig +++ b/polkadot/bridges/.editorconfig @@ -14,3 +14,6 @@ indent_style=space indent_size=2 tab_width=8 end_of_line=lf + +[*.md] +max_line_length=80 diff --git a/polkadot/bridges/.gitignore b/polkadot/bridges/.gitignore index 0ab0857843256bb07e0f97192f74e6e6bada6fd3..5d10cfa41a4487247e2c331144d3dabf0ec5e6f7 100644 --- a/polkadot/bridges/.gitignore +++ b/polkadot/bridges/.gitignore @@ -18,6 +18,7 @@ hfuzz_workspace .DS_Store +.cargo .idea .vscode *.iml diff --git a/polkadot/bridges/.gitlab-ci.yml b/polkadot/bridges/.gitlab-ci.yml index b49df92c73c897c55e7a5e0269f04b187fe2e3fc..0e69a91af165143d5d71b26fceb7ecb0645a893f 100644 --- a/polkadot/bridges/.gitlab-ci.yml +++ b/polkadot/bridges/.gitlab-ci.yml @@ -15,7 +15,7 @@ variables: &default-vars GIT_DEPTH: 100 CARGO_INCREMENTAL: 0 ARCH: "x86_64" - CI_IMAGE: "paritytech/bridges-ci:production" + CI_IMAGE: "paritytech/bridges-ci:staging" RUST_BACKTRACE: full default: @@ -76,6 +76,7 @@ default: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]{4}-[0-9]{2}-[0-9]{2}.*$/ # i.e. v2021-09-27, v2021-09-27-1 # there are two types of nightly pipelines: # 1. this one is triggered by the schedule with $PIPELINE == "nightly", it's for releasing. # this job runs only on nightly pipeline with the mentioned variable, against `master` branch @@ -93,26 +94,22 @@ clippy-nightly: stage: lint <<: *docker-env <<: *test-refs - variables: - RUSTFLAGS: "-D warnings" script: - - cargo +nightly clippy --all-targets - # FIXME: remove when all the warns are fixed - allow_failure: true + - SKIP_WASM_BUILD=1 cargo +nightly clippy --all-targets -- -A clippy::redundant_closure fmt: stage: lint <<: *docker-env <<: *test-refs script: - - cargo fmt --all -- --check + - cargo +nightly fmt --all -- --check spellcheck: stage: lint <<: *docker-env <<: *test-refs script: - - cargo spellcheck check -m 1 -vv $(find modules/currency-exchange/src -name "*.rs") + - cargo spellcheck check -vvvv --cfg=.config/spellcheck.toml --checkers hunspell -m 1 #### stage: check @@ -121,11 +118,11 @@ check: <<: *docker-env <<: *test-refs script: &check-script - - time cargo check --verbose --workspace + - SKIP_WASM_BUILD=1 time cargo check --locked --verbose --workspace # Check Rialto benchmarks runtime - - time cargo check -p rialto-runtime --features runtime-benchmarks --verbose + - SKIP_WASM_BUILD=1 time cargo check -p rialto-runtime --locked --features runtime-benchmarks --verbose # Check Millau benchmarks runtime - - time cargo check -p millau-runtime --features runtime-benchmarks --verbose + - SKIP_WASM_BUILD=1 time cargo check -p millau-runtime --locked --features runtime-benchmarks --verbose check-nightly: stage: check @@ -141,8 +138,13 @@ test: stage: test <<: *docker-env <<: *test-refs +# variables: +# RUSTFLAGS: "-D warnings" script: &test-script - - time cargo test --verbose --workspace + - time cargo fetch + - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-test-runtime\").manifest_path"` + - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-runtime\").manifest_path"` + - CARGO_NET_OFFLINE=true time cargo test --verbose --workspace test-nightly: stage: test @@ -189,16 +191,19 @@ build: <<: *collect-artifacts # master script: &build-script - - time cargo build --release --verbose --workspace + - time cargo fetch + - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-test-runtime\").manifest_path"` + - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-runtime\").manifest_path"` + - CARGO_NET_OFFLINE=true time cargo build --release --verbose --workspace after_script: # Prepare artifacts - mkdir -p ./artifacts - strip ./target/release/rialto-bridge-node - mv -v ./target/release/rialto-bridge-node ./artifacts/ + - strip ./target/release/rialto-parachain-collator + - mv -v ./target/release/rialto-parachain-collator ./artifacts/ - strip ./target/release/millau-bridge-node - mv -v ./target/release/millau-bridge-node ./artifacts/ - - strip ./target/release/ethereum-poa-relay - - mv -v ./target/release/ethereum-poa-relay ./artifacts/ - strip ./target/release/substrate-relay - mv -v ./target/release/substrate-relay ./artifacts/ - mv -v ./deployments/local-scripts/bridge-entrypoint.sh ./artifacts/ @@ -223,6 +228,9 @@ build-nightly: GIT_STRATEGY: none DOCKERFILE: ci.Dockerfile IMAGE_NAME: docker.io/paritytech/$CI_JOB_NAME + VAULT_SERVER_URL: "https://vault.parity-mgmt-vault.parity.io" + VAULT_AUTH_PATH: "gitlab-parity-io-jwt" + VAULT_AUTH_ROLE: "cicd_gitlab_parity_${CI_PROJECT_NAME}" needs: - job: build artifacts: true @@ -233,8 +241,15 @@ build-nightly: VERSION=$(echo ${CI_COMMIT_REF_NAME} | sed -r 's#/+#-#g'); fi - echo "Effective tags = ${VERSION} sha-${CI_COMMIT_SHORT_SHA} latest" + secrets: + DOCKER_HUB_USER: + vault: cicd/gitlab/parity/DOCKER_HUB_USER@kv + file: false + DOCKER_HUB_PASS: + vault: cicd/gitlab/parity/DOCKER_HUB_PASS@kv + file: false script: - - test "${Docker_Hub_User_Parity}" -a "${Docker_Hub_Pass_Parity}" || + - test "${DOCKER_HUB_USER}" -a "${DOCKER_HUB_PASS}" || ( echo "no docker credentials provided"; exit 1 ) - cd ./artifacts - buildah bud @@ -248,24 +263,24 @@ build-nightly: --tag "${IMAGE_NAME}:latest" --file "${DOCKERFILE}" . # The job will success only on the protected branch - - echo "$Docker_Hub_Pass_Parity" | - buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io + - echo "${DOCKER_HUB_PASS}" | + buildah login --username "${DOCKER_HUB_USER}" --password-stdin docker.io - buildah info - buildah push --format=v2s2 "${IMAGE_NAME}:${VERSION}" - buildah push --format=v2s2 "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}" - buildah push --format=v2s2 "${IMAGE_NAME}:latest" after_script: - - env REGISTRY_AUTH_FILE= buildah logout "$IMAGE_NAME" + - env REGISTRY_AUTH_FILE= buildah logout --all rialto-bridge-node: stage: publish <<: *build-push-image -millau-bridge-node: +rialto-parachain-collator: stage: publish <<: *build-push-image -ethereum-poa-relay: +millau-bridge-node: stage: publish <<: *build-push-image diff --git a/polkadot/bridges/.maintain/millau-weight-template.hbs b/polkadot/bridges/.maintain/millau-weight-template.hbs new file mode 100644 index 0000000000000000000000000000000000000000..7a2a67627bb20c456be413f7b86487cecfd8168b --- /dev/null +++ b/polkadot/bridges/.maintain/millau-weight-template.hbs @@ -0,0 +1,103 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Autogenerated weights for `{{pallet}}` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} +//! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}} +//! LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} +//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}} +//! CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} + +// Executed Command: +{{#each args as |arg|~}} +// {{arg}} +{{/each}} + +#![allow(clippy::all)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for `{{pallet}}`. +pub trait WeightInfo { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{c.name}}: u32, {{/each~}} + ) -> Weight; + {{~/each}} +} + +/// Weights for `{{pallet}}` using the Millau node and recommended hardware. +pub struct MillauWeight(PhantomData); +impl WeightInfo for MillauWeight { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + {{~/each}} + } + {{~/each}} +} + +// For backwards compatibility and tests +impl WeightInfo for () { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + {{~/each}} + } + {{~/each}} +} diff --git a/polkadot/bridges/.maintain/rialto-weight-template.hbs b/polkadot/bridges/.maintain/rialto-weight-template.hbs index 4868e6c84bb2810f028ef94a8944b0da219363cc..cb1b58d23b26420e9f6d89c9b7fc6ce3b7fa9141 100644 --- a/polkadot/bridges/.maintain/rialto-weight-template.hbs +++ b/polkadot/bridges/.maintain/rialto-weight-template.hbs @@ -8,13 +8,14 @@ // Parity Bridges Common is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . +// along with Parity Bridges Common. If not, see +. -//! Autogenerated weights for {{cmd.pallet}} +//! Autogenerated weights for `{{pallet}}` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}} @@ -34,70 +35,74 @@ use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use sp_std::marker::PhantomData; -/// Weight functions needed for {{pallet}}. +/// Weight functions needed for `{{pallet}}`. pub trait WeightInfo { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{c.name}}: u32, {{/each~}} - ) -> Weight; - {{~/each}} +{{~#each benchmarks as |benchmark|}} +fn {{benchmark.name~}} +( +{{~#each benchmark.components as |c| ~}} +{{c.name}}: u32, {{/each~}} +) -> Weight; +{{~/each}} } -/// Weights for {{pallet}} using the Rialto node and recommended hardware. +/// Weights for `{{pallet}}` using the Rialto node and recommended hardware. pub struct RialtoWeight(PhantomData); -impl WeightInfo for RialtoWeight { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} - ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} - } - {{~/each}} -} + impl WeightInfo for RialtoWeight { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as + Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as + Weight))) + {{~/each}} + } + {{~/each}} + } -// For backwards compatibility and tests -impl WeightInfo for () { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} - ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} - .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} - .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} - .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} - .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} - } - {{~/each}} -} + // For backwards compatibility and tests + impl WeightInfo for () { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as + Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as + Weight))) + {{~/each}} + } + {{~/each}} + } \ No newline at end of file diff --git a/polkadot/bridges/CODEOWNERS b/polkadot/bridges/CODEOWNERS new file mode 100644 index 0000000000000000000000000000000000000000..3941ba8451a170f498c9070cca02e8032bc47f4a --- /dev/null +++ b/polkadot/bridges/CODEOWNERS @@ -0,0 +1,21 @@ +# Lists some code owners. +# +# A codeowner just oversees some part of the codebase. If an owned file is changed then the +# corresponding codeowner receives a review request. An approval of the codeowner might be +# required for merging a PR (depends on repository settings). +# +# For details about syntax, see: +# https://help.github.com/en/articles/about-code-owners +# But here are some important notes: +# +# - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` +# which can be everywhere. +# - Multiple owners are supported. +# - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, +# that handles might work better because they are more recognizable on GitHub, +# eyou can use them for mentioning unlike an email. +# - The latest matching rule, if multiple, takes precedence. + +# CI +/.github/ @paritytech/ci +/.gitlab-ci.yml @paritytech/ci diff --git a/polkadot/bridges/Cargo.lock b/polkadot/bridges/Cargo.lock deleted file mode 100644 index 86b075028125f626e1c70ca594554bd0fcfbd9c5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/Cargo.lock +++ /dev/null @@ -1,10202 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli 0.23.0", -] - -[[package]] -name = "addr2line" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" -dependencies = [ - "gimli 0.24.0", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "aes" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" -dependencies = [ - "aes-soft", - "aesni", - "block-cipher", -] - -[[package]] -name = "aes-gcm" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" -dependencies = [ - "aead", - "aes", - "block-cipher", - "ghash", - "subtle 2.4.0", -] - -[[package]] -name = "aes-soft" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" -dependencies = [ - "block-cipher", - "byteorder", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aesni" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" -dependencies = [ - "block-cipher", - "opaque-debug 0.3.0", -] - -[[package]] -name = "ahash" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" - -[[package]] -name = "aho-corasick" -version = "0.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "anyhow" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" - -[[package]] -name = "approx" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" -dependencies = [ - "num-traits", -] - -[[package]] -name = "arbitrary" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b" - -[[package]] -name = "array_tool" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f8cb5d814eb646a863c4f24978cff2880c4be96ad8cde2c0f0678732902e271" - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a2f58b0bb10c380af2b26e57212856b8c9a59e0925b4c20f4a174a49734eaf7" - -[[package]] -name = "asn1_der" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" - -[[package]] -name = "assert_matches" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" - -[[package]] -name = "async-attributes" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "async-channel" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-executor" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "once_cell", - "vec-arena", -] - -[[package]] -name = "async-global-executor" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" -dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-mutex", - "blocking", - "futures-lite", - "num_cpus", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" -dependencies = [ - "concurrent-queue", - "fastrand", - "futures-lite", - "libc", - "log", - "nb-connect", - "once_cell", - "parking", - "polling", - "vec-arena", - "waker-fn", - "winapi 0.3.9", -] - -[[package]] -name = "async-lock" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-mutex" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-process" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef37b86e2fa961bae5a4d212708ea0154f904ce31d1a4a7f47e1bbc33a0c040b" -dependencies = [ - "async-io", - "blocking", - "cfg-if 1.0.0", - "event-listener", - "futures-lite", - "once_cell", - "signal-hook", - "winapi 0.3.9", -] - -[[package]] -name = "async-std" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" -dependencies = [ - "async-attributes", - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils 0.8.3", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "num_cpus", - "once_cell", - "pin-project-lite 0.2.4", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - -[[package]] -name = "async-std-resolver" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665c56111e244fe38e7708ee10948a4356ad6a548997c21f5a63a0f4e0edc4d" -dependencies = [ - "async-std", - "async-trait", - "futures-io", - "futures-util", - "pin-utils", - "trust-dns-resolver", -] - -[[package]] -name = "async-task" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" - -[[package]] -name = "async-tls" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400" -dependencies = [ - "futures-core", - "futures-io", - "rustls 0.19.0", - "webpki 0.21.4", - "webpki-roots", -] - -[[package]] -name = "async-trait" -version = "0.1.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "asynchronous-codec" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4401f0a3622dad2e0763fa79e0eb328bc70fb7dccfdd645341f00d671247d6" -dependencies = [ - "bytes 1.0.1", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite 0.2.4", -] - -[[package]] -name = "asynchronous-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" -dependencies = [ - "bytes 1.0.1", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite 0.2.4", -] - -[[package]] -name = "atomic" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" -dependencies = [ - "autocfg", -] - -[[package]] -name = "atomic-waker" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "autocfg" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" - -[[package]] -name = "backoff" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721c249ab59cbc483ad4294c9ee2671835c1e43e9ffc277e6b4ecfef733cfdc5" -dependencies = [ - "instant", - "rand 0.7.3", -] - -[[package]] -name = "backtrace" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" -dependencies = [ - "addr2line 0.14.1", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object 0.23.0", - "rustc-demangle", -] - -[[package]] -name = "base-x" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" - -[[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - -[[package]] -name = "base64" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" - -[[package]] -name = "beef" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409" - -[[package]] -name = "bincode" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" -dependencies = [ - "byteorder", - "serde", -] - -[[package]] -name = "bindgen" -version = "0.54.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" -dependencies = [ - "bitflags", - "cexpr", - "cfg-if 0.1.10", - "clang-sys", - "clap", - "env_logger 0.7.1", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "which 3.1.1", -] - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "bitvec" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - -[[package]] -name = "blake2b_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake2s_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake3" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "cc", - "cfg-if 0.1.10", - "constant_time_eq", - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.3", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.4", -] - -[[package]] -name = "block-cipher" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "blocking" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" -dependencies = [ - "async-channel", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", - "once_cell", -] - -[[package]] -name = "bp-currency-exchange" -version = "0.1.0" -dependencies = [ - "frame-support", - "parity-scale-codec", - "sp-api", - "sp-std", -] - -[[package]] -name = "bp-eth-poa" -version = "0.1.0" -dependencies = [ - "ethbloom 0.10.0", - "fixed-hash", - "hash-db", - "hex-literal 0.2.1", - "impl-rlp", - "impl-serde", - "libsecp256k1", - "parity-bytes", - "parity-scale-codec", - "plain_hasher", - "primitive-types", - "rlp", - "serde", - "serde-big-array", - "sp-api", - "sp-io", - "sp-runtime", - "sp-std", - "triehash", -] - -[[package]] -name = "bp-header-chain" -version = "0.1.0" -dependencies = [ - "assert_matches", - "bp-test-utils", - "finality-grandpa", - "frame-support", - "parity-scale-codec", - "serde", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-kusama" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-polkadot-core", - "bp-runtime", - "sp-api", - "sp-std", -] - -[[package]] -name = "bp-message-dispatch" -version = "0.1.0" -dependencies = [ - "bp-runtime", - "frame-support", - "parity-scale-codec", - "sp-std", -] - -[[package]] -name = "bp-messages" -version = "0.1.0" -dependencies = [ - "bitvec", - "bp-runtime", - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "serde", - "sp-std", -] - -[[package]] -name = "bp-millau" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-runtime", - "fixed-hash", - "frame-support", - "frame-system", - "hash256-std-hasher", - "impl-codec", - "impl-serde", - "max-encoded-len", - "parity-util-mem", - "serde", - "sp-api", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-trie", -] - -[[package]] -name = "bp-polkadot" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-polkadot-core", - "bp-runtime", - "sp-api", - "sp-std", -] - -[[package]] -name = "bp-polkadot-core" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-runtime", - "frame-support", - "frame-system", - "hex", - "parity-scale-codec", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "bp-rialto" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-runtime", - "frame-support", - "frame-system", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-rococo" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "parity-scale-codec", - "smallvec 1.6.1", - "sp-api", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "bp-runtime" -version = "0.1.0" -dependencies = [ - "frame-support", - "hash-db", - "num-traits", - "parity-scale-codec", - "sp-core", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "bp-test-utils" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "ed25519-dalek", - "finality-grandpa", - "parity-scale-codec", - "sp-application-crypto", - "sp-finality-grandpa", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-westend" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-polkadot-core", - "bp-runtime", - "parity-scale-codec", - "sp-api", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "bp-wococo" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-polkadot-core", - "bp-rococo", - "bp-runtime", - "parity-scale-codec", - "sp-api", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bridge-runtime-common" -version = "0.1.0" -dependencies = [ - "bp-message-dispatch", - "bp-messages", - "bp-runtime", - "ed25519-dalek", - "frame-support", - "hash-db", - "pallet-bridge-dispatch", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-transaction-payment", - "parity-scale-codec", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - -[[package]] -name = "bstr" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" -dependencies = [ - "memchr", -] - -[[package]] -name = "build-helper" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdce191bf3fa4995ce948c8c83b4640a1745457a149e73c6db75b4ffe36aad5f" -dependencies = [ - "semver 0.6.0", -] - -[[package]] -name = "bumpalo" -version = "3.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" - -[[package]] -name = "byte-slice-cast" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - -[[package]] -name = "bytes" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -dependencies = [ - "byteorder", - "either", - "iovec", -] - -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - -[[package]] -name = "bytes" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" - -[[package]] -name = "cache-padded" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" - -[[package]] -name = "camino" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4648c6d00a709aa069a236adcaae4f605a6241c72bf5bee79331a4b625921a9" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo-platform" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0226944a63d1bf35a3b5f948dd7c59e263db83695c9e8bffc4037de02e30f1d7" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "081e3f0755c1f380c2d010481b6fa2e02973586d5f2b24eebb7a2a1d98b143d8" -dependencies = [ - "camino", - "cargo-platform", - "semver 0.11.0", - "semver-parser 0.10.2", - "serde", - "serde_json", -] - -[[package]] -name = "cc" -version = "1.0.67" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" -dependencies = [ - "jobserver", -] - -[[package]] -name = "cexpr" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chacha20" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" -dependencies = [ - "stream-cipher", - "zeroize", -] - -[[package]] -name = "chacha20poly1305" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" -dependencies = [ - "aead", - "chacha20", - "poly1305", - "stream-cipher", - "zeroize", -] - -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time 0.1.44", - "winapi 0.3.9", -] - -[[package]] -name = "cid" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff0e3bc0b6446b3f9663c1a6aba6ef06c5aeaa1bc92bd18077be337198ab9768" -dependencies = [ - "multibase", - "multihash", - "unsigned-varint 0.5.1", -] - -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "clang-sys" -version = "0.29.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "2.33.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" -dependencies = [ - "ansi_term 0.11.0", - "atty", - "bitflags", - "strsim", - "textwrap", - "unicode-width", - "vec_map", - "yaml-rust", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - -[[package]] -name = "concurrent-queue" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" -dependencies = [ - "cache-padded", -] - -[[package]] -name = "const_fn" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "core-foundation" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" -dependencies = [ - "core-foundation-sys 0.7.0", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" - -[[package]] -name = "core-foundation-sys" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" - -[[package]] -name = "cpp_demangle" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390" -dependencies = [ - "cfg-if 1.0.0", - "glob", -] - -[[package]] -name = "cpuid-bool" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" - -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - -[[package]] -name = "cranelift-bforest" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ca3560686e7c9c7ed7e0fe77469f2410ba5d7781b1acaa9adc8d8deea28e3e" -dependencies = [ - "cranelift-entity", -] - -[[package]] -name = "cranelift-codegen" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf9bf1ffffb6ce3d2e5ebc83549bd2436426c99b31cc550d521364cbe35d276" -dependencies = [ - "cranelift-bforest", - "cranelift-codegen-meta", - "cranelift-codegen-shared", - "cranelift-entity", - "gimli 0.24.0", - "log", - "regalloc", - "serde", - "smallvec 1.6.1", - "target-lexicon", -] - -[[package]] -name = "cranelift-codegen-meta" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc21936a5a6d07e23849ffe83e5c1f6f50305c074f4b2970ca50c13bf55b821" -dependencies = [ - "cranelift-codegen-shared", - "cranelift-entity", -] - -[[package]] -name = "cranelift-codegen-shared" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5b6ffaa87560bebe69a5446449da18090b126037920b0c1c6d5945f72faf6b" -dependencies = [ - "serde", -] - -[[package]] -name = "cranelift-entity" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6b4a8bef04f82e4296782646f733c641d09497df2fabf791323fefaa44c64c" -dependencies = [ - "serde", -] - -[[package]] -name = "cranelift-frontend" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b783b351f966fce33e3c03498cb116d16d97a8f9978164a60920bd0d3a99c" -dependencies = [ - "cranelift-codegen", - "log", - "smallvec 1.6.1", - "target-lexicon", -] - -[[package]] -name = "cranelift-native" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c88d3dd48021ff1e37e978a00098524abd3513444ae252c08d37b310b3d2a" -dependencies = [ - "cranelift-codegen", - "target-lexicon", -] - -[[package]] -name = "cranelift-wasm" -version = "0.74.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb6d408e2da77cdbbd65466298d44c86ae71c1785d2ab0d8657753cdb4d9d89" -dependencies = [ - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "itertools 0.10.0", - "log", - "serde", - "smallvec 1.6.1", - "thiserror", - "wasmparser", -] - -[[package]] -name = "crc32fast" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.3", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.3", - "crossbeam-utils 0.8.3", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.3", - "lazy_static", - "memoffset 0.6.1", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" -dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "lazy_static", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array 0.12.3", - "subtle 1.0.0", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", -] - -[[package]] -name = "ct-logs" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c8e13110a84b6315df212c045be706af261fd364791cad863285439ebba672e" -dependencies = [ - "sct", -] - -[[package]] -name = "ctor" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8f45d9ad417bcef4817d614a501ab55cdd96a6fdb24f49aab89a54acfd66b19" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "cuckoofilter" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" -dependencies = [ - "byteorder", - "fnv", - "rand 0.7.3", -] - -[[package]] -name = "curl" -version = "0.4.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a872858e9cb9e3b96c80dd78774ad9e32e44d3b05dc31e142b858d14aebc82c" -dependencies = [ - "curl-sys", - "libc", - "openssl-probe", - "openssl-sys", - "schannel", - "socket2 0.3.19", - "winapi 0.3.9", -] - -[[package]] -name = "curl-sys" -version = "0.4.41+curl-7.75.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ec466abd277c7cab2905948f3e94d10bc4963f1f5d47921c1cc4ffd2028fe65" -dependencies = [ - "cc", - "libc", - "libnghttp2-sys", - "libz-sys", - "openssl-sys", - "pkg-config", - "vcpkg", - "winapi 0.3.9", -] - -[[package]] -name = "curve25519-dalek" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "data-encoding" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" - -[[package]] -name = "data-encoding-macro" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a94feec3d2ba66c0b6621bca8bc6f68415b1e5c69af3586fdd0af9fd9f29b17" -dependencies = [ - "data-encoding", - "data-encoding-macro-internal", -] - -[[package]] -name = "data-encoding-macro-internal" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f83e699727abca3c56e187945f303389590305ab2f0185ea445aa66e8d5f2a" -dependencies = [ - "data-encoding", - "syn", -] - -[[package]] -name = "derive_more" -version = "0.99.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.3", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "directories" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "directories-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" -dependencies = [ - "libc", - "redox_users 0.3.5", - "winapi 0.3.9", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users 0.4.0", - "winapi 0.3.9", -] - -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - -[[package]] -name = "dns-parser" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" -dependencies = [ - "byteorder", - "quick-error 1.2.3", -] - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "downcast-rs" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" - -[[package]] -name = "dyn-clonable" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" -dependencies = [ - "dyn-clonable-impl", - "dyn-clone", -] - -[[package]] -name = "dyn-clonable-impl" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "dyn-clone" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" - -[[package]] -name = "ed25519" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" -dependencies = [ - "signature", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.0.2", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.9.3", - "zeroize", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "encoding_rs" -version = "0.8.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "enum-as-inner" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "env_logger" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" -dependencies = [ - "atty", - "humantime 2.1.0", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "environmental" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" - -[[package]] -name = "erased-serde" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0465971a8cc1fa2455c8465aaa377131e1f1cf4983280f474a13e68793aa770c" -dependencies = [ - "serde", -] - -[[package]] -name = "errno" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" -dependencies = [ - "gcc", - "libc", -] - -[[package]] -name = "ethabi" -version = "14.0.0" -source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" -dependencies = [ - "anyhow", - "ethereum-types", - "hex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethabi-contract" -version = "11.0.0" -source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" - -[[package]] -name = "ethabi-derive" -version = "14.0.0" -source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" -dependencies = [ - "anyhow", - "ethabi", - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ethbloom" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a621dcebea74f2a6f2002d0a885c81ccf6cbdf86760183316a7722b5707ca4" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "tiny-keccak", -] - -[[package]] -name = "ethbloom" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779864b9c7f7ead1f092972c3257496c6a84b46dba2ce131dd8a282cb2cc5972" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] - -[[package]] -name = "ethereum-contract-builtin" -version = "0.1.0" -dependencies = [ - "ethereum-types", - "finality-grandpa", - "hex", - "log", - "parity-scale-codec", - "rialto-runtime", - "sc-finality-grandpa", - "sp-blockchain", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", -] - -[[package]] -name = "ethereum-poa-relay" -version = "0.1.0" -dependencies = [ - "ansi_term 0.12.1", - "async-std", - "async-trait", - "bp-currency-exchange", - "bp-eth-poa", - "clap", - "env_logger 0.8.3", - "ethabi", - "ethabi-contract", - "ethabi-derive", - "exchange-relay", - "frame-system", - "futures 0.3.13", - "headers-relay", - "hex", - "hex-literal 0.3.1", - "libsecp256k1", - "log", - "messages-relay", - "num-traits", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-ethereum-client", - "relay-rialto-client", - "relay-substrate-client", - "relay-utils", - "rialto-runtime", - "serde", - "serde_json", - "sp-core", - "sp-keyring", - "sp-runtime", - "substrate-prometheus-endpoint", - "time 0.2.25", -] - -[[package]] -name = "ethereum-types" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64b5df66a228d85e4b17e5d6c6aa43b0310898ffe8a85988c4c032357aaabfd" -dependencies = [ - "ethbloom 0.11.0", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - -[[package]] -name = "event-listener" -version = "2.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" - -[[package]] -name = "exchange-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "backoff", - "futures 0.3.13", - "log", - "num-traits", - "parking_lot 0.11.1", - "relay-utils", -] - -[[package]] -name = "exit-future" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" -dependencies = [ - "futures 0.3.13", -] - -[[package]] -name = "failure" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fallible-iterator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" - -[[package]] -name = "fastrand" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" -dependencies = [ - "instant", -] - -[[package]] -name = "fdlimit" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b" -dependencies = [ - "libc", -] - -[[package]] -name = "file-per-thread-logger" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" -dependencies = [ - "env_logger 0.7.1", - "log", -] - -[[package]] -name = "finality-grandpa" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74a1bfdcc776e63e49f741c7ce6116fa1b887e8ac2e3ccb14dd4aa113e54feb9" -dependencies = [ - "either", - "futures 0.3.13", - "futures-timer 3.0.2", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.1", -] - -[[package]] -name = "finality-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "backoff", - "bp-header-chain", - "futures 0.3.13", - "headers-relay", - "log", - "num-traits", - "parking_lot 0.11.1", - "relay-utils", -] - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.3", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" - -[[package]] -name = "flate2" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" -dependencies = [ - "cfg-if 1.0.0", - "crc32fast", - "libc", - "libz-sys", - "miniz_oxide", -] - -[[package]] -name = "flume" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "531a685ab99b8f60a271b44d5dd1a76e55124a8c9fa0407b7a8e9cd172d5b588" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project 1.0.5", - "spinning_top", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "fork-tree" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "form_urlencoded" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" -dependencies = [ - "matches", - "percent-encoding 2.1.0", -] - -[[package]] -name = "frame-benchmarking" -version = "3.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "linregress", - "log", - "parity-scale-codec", - "paste 1.0.4", - "sp-api", - "sp-io", - "sp-runtime", - "sp-runtime-interface", - "sp-std", - "sp-storage", -] - -[[package]] -name = "frame-benchmarking-cli" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "Inflector", - "chrono", - "frame-benchmarking", - "handlebars", - "parity-scale-codec", - "sc-cli", - "sc-client-db", - "sc-executor", - "sc-service", - "serde", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "structopt", -] - -[[package]] -name = "frame-executive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-tracing", -] - -[[package]] -name = "frame-metadata" -version = "13.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-core", - "sp-std", -] - -[[package]] -name = "frame-support" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "bitflags", - "frame-metadata", - "frame-support-procedural", - "impl-trait-for-tuples", - "log", - "max-encoded-len", - "once_cell", - "parity-scale-codec", - "paste 1.0.4", - "serde", - "smallvec 1.6.1", - "sp-arithmetic", - "sp-core", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-staking", - "sp-state-machine", - "sp-std", - "sp-tracing", -] - -[[package]] -name = "frame-support-procedural" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "Inflector", - "frame-support-procedural-tools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support-procedural-tools-derive", - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-system" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "frame-system-rpc-runtime-api" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "sp-api", -] - -[[package]] -name = "fs-swap" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" -dependencies = [ - "lazy_static", - "libc", - "libloading", - "winapi 0.3.9", -] - -[[package]] -name = "fs2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" - -[[package]] -name = "futures" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" - -[[package]] -name = "futures-cpupool" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -dependencies = [ - "futures 0.1.31", - "num_cpus", -] - -[[package]] -name = "futures-executor" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" - -[[package]] -name = "futures-lite" -version = "1.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite 0.2.4", - "waker-fn", -] - -[[package]] -name = "futures-macro" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-rustls" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b" -dependencies = [ - "futures-io", - "rustls 0.19.0", - "webpki 0.21.4", -] - -[[package]] -name = "futures-sink" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" - -[[package]] -name = "futures-task" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" - -[[package]] -name = "futures-timer" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - -[[package]] -name = "futures-util" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" -dependencies = [ - "futures 0.1.31", - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite 0.2.4", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", -] - -[[package]] -name = "gcc" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", -] - -[[package]] -name = "ghash" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" -dependencies = [ - "opaque-debug 0.3.0", - "polyval", -] - -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - -[[package]] -name = "gimli" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" -dependencies = [ - "fallible-iterator", - "indexmap", - "stable_deref_trait", -] - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "globset" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" -dependencies = [ - "aho-corasick", - "bstr", - "fnv", - "log", - "regex", -] - -[[package]] -name = "gloo-timers" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "h2" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" -dependencies = [ - "byteorder", - "bytes 0.4.12", - "fnv", - "futures 0.1.31", - "http 0.1.21", - "indexmap", - "log", - "slab", - "string", - "tokio-io", -] - -[[package]] -name = "h2" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.3", - "indexmap", - "slab", - "tokio 0.2.25", - "tokio-util", - "tracing", - "tracing-futures", -] - -[[package]] -name = "handlebars" -version = "3.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb0867bbc5a3da37a753e78021d5fcf8a4db00e18dd2dd90fd36e24190e162d" -dependencies = [ - "log", - "pest", - "pest_derive", - "quick-error 2.0.0", - "serde", - "serde_json", -] - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" -dependencies = [ - "ahash", -] - -[[package]] -name = "headers-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "backoff", - "futures 0.3.13", - "linked-hash-map", - "log", - "num-traits", - "parking_lot 0.11.1", - "relay-utils", -] - -[[package]] -name = "heck" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hex-literal" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" -dependencies = [ - "hex-literal-impl", - "proc-macro-hack", -] - -[[package]] -name = "hex-literal" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" - -[[package]] -name = "hex-literal-impl" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46" -dependencies = [ - "proc-macro-hack", -] - -[[package]] -name = "hex_fmt" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", -] - -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "hmac-drbg" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" -dependencies = [ - "digest 0.8.1", - "generic-array 0.12.3", - "hmac 0.7.1", -] - -[[package]] -name = "honggfuzz" -version = "0.5.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" -dependencies = [ - "arbitrary", - "lazy_static", - "memmap", -] - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi 0.3.9", -] - -[[package]] -name = "http" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -dependencies = [ - "bytes 0.4.12", - "fnv", - "itoa", -] - -[[package]] -name = "http" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" -dependencies = [ - "bytes 1.0.1", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "http 0.1.21", - "tokio-buf", -] - -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = [ - "bytes 0.5.6", - "http 0.2.3", -] - -[[package]] -name = "httparse" -version = "1.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" - -[[package]] -name = "httpdate" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" - -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error 1.2.3", -] - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "hyper" -version = "0.12.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c843caf6296fc1f93444735205af9ed4e109a539005abb2564ae1d6fad34c52" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "futures-cpupool", - "h2 0.1.26", - "http 0.1.21", - "http-body 0.1.0", - "httparse", - "iovec", - "itoa", - "log", - "net2", - "rustc_version", - "time 0.1.44", - "tokio 0.1.22", - "tokio-buf", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "want 0.2.0", -] - -[[package]] -name = "hyper" -version = "0.13.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" -dependencies = [ - "bytes 0.5.6", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.2.7", - "http 0.2.3", - "http-body 0.3.1", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.5", - "socket2 0.3.19", - "tokio 0.2.25", - "tower-service", - "tracing", - "want 0.3.0", -] - -[[package]] -name = "hyper-rustls" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" -dependencies = [ - "bytes 0.5.6", - "ct-logs", - "futures-util", - "hyper 0.13.10", - "log", - "rustls 0.18.1", - "rustls-native-certs", - "tokio 0.2.25", - "tokio-rustls", - "webpki 0.21.4", -] - -[[package]] -name = "idna" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "if-addrs" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" -dependencies = [ - "if-addrs-sys", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "if-addrs-sys" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "if-watch" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6d52908d4ea4ab2bc22474ba149bf1011c8e2c3ebc1ff593ae28ac44f494b6" -dependencies = [ - "async-io", - "futures 0.3.13", - "futures-lite", - "if-addrs", - "ipnet", - "libc", - "log", - "winapi 0.3.9", -] - -[[package]] -name = "impl-codec" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "indexmap" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" -dependencies = [ - "autocfg", - "hashbrown", - "serde", -] - -[[package]] -name = "instant" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" -dependencies = [ - "num-traits", -] - -[[package]] -name = "intervalier" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" -dependencies = [ - "futures 0.3.13", - "futures-timer 2.0.2", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] - -[[package]] -name = "ip_network" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ee15951c035f79eddbef745611ec962f63f4558f1dadf98ab723cc603487c6f" - -[[package]] -name = "ipconfig" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" -dependencies = [ - "socket2 0.3.19", - "widestring", - "winapi 0.3.9", - "winreg", -] - -[[package]] -name = "ipnet" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" - -[[package]] -name = "isahc" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b24d2aed6bbe6faeab0e164ec2e9e6193fcfcfe489b6eb59fb0d0d34947d73" -dependencies = [ - "crossbeam-utils 0.8.3", - "curl", - "curl-sys", - "encoding_rs", - "flume", - "futures-lite", - "http 0.2.3", - "log", - "mime", - "once_cell", - "polling", - "slab", - "sluice", - "tracing", - "tracing-futures", - "url 2.2.1", - "waker-fn", -] - -[[package]] -name = "itertools" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" - -[[package]] -name = "jobserver" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" -dependencies = [ - "libc", -] - -[[package]] -name = "js-sys" -version = "0.3.50" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "jsonpath_lib" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61352ec23883402b7d30b3313c16cbabefb8907361c4eb669d990cbb87ceee5a" -dependencies = [ - "array_tool", - "env_logger 0.7.1", - "log", - "serde", - "serde_json", -] - -[[package]] -name = "jsonrpc-client-transports" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" -dependencies = [ - "failure", - "futures 0.1.31", - "jsonrpc-core 15.1.0", - "jsonrpc-pubsub", - "log", - "serde", - "serde_json", - "url 1.7.2", -] - -[[package]] -name = "jsonrpc-core" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" -dependencies = [ - "futures 0.1.31", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonrpc-core" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07569945133257ff557eb37b015497104cea61a2c9edaf126c1cbd6e8332397f" -dependencies = [ - "futures 0.3.13", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonrpc-core-client" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db" -dependencies = [ - "jsonrpc-client-transports", -] - -[[package]] -name = "jsonrpc-derive" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" -dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "jsonrpc-http-server" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" -dependencies = [ - "hyper 0.12.36", - "jsonrpc-core 15.1.0", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot 0.10.2", - "unicase", -] - -[[package]] -name = "jsonrpc-ipc-server" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf50e53e4eea8f421a7316c5f63e395f7bc7c4e786a6dc54d76fab6ff7aa7ce7" -dependencies = [ - "jsonrpc-core 15.1.0", - "jsonrpc-server-utils", - "log", - "parity-tokio-ipc", - "parking_lot 0.10.2", - "tokio-service", -] - -[[package]] -name = "jsonrpc-pubsub" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77" -dependencies = [ - "jsonrpc-core 15.1.0", - "log", - "parking_lot 0.10.2", - "rand 0.7.3", - "serde", -] - -[[package]] -name = "jsonrpc-server-utils" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176" -dependencies = [ - "bytes 0.4.12", - "globset", - "jsonrpc-core 15.1.0", - "lazy_static", - "log", - "tokio 0.1.22", - "tokio-codec", - "unicase", -] - -[[package]] -name = "jsonrpc-ws-server" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6596fe75209b73a2a75ebe1dce4e60e03b88a2b25e8807b667597f6315150d22" -dependencies = [ - "jsonrpc-core 15.1.0", - "jsonrpc-server-utils", - "log", - "parity-ws", - "parking_lot 0.10.2", - "slab", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5784ee8bb31988fa2c7a755fe31b0e21aa51894a67e5c99b6d4470f0253bf31a" -dependencies = [ - "Inflector", - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab3dabceeeeb865897661d532d47202eaae71cd2c606f53cb69f1fbc0555a51" -dependencies = [ - "async-trait", - "beef", - "futures-channel", - "futures-util", - "log", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "jsonrpsee-ws-client" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6fdb4390bd25358c62e8b778652a564a1723ba07dca0feb3da439c2253fe59f" -dependencies = [ - "async-std", - "async-tls", - "async-trait", - "fnv", - "futures 0.3.13", - "jsonrpsee-types", - "log", - "pin-project 1.0.5", - "serde", - "serde_json", - "soketto", - "thiserror", - "url 2.2.1", - "webpki 0.22.0", -] - -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - -[[package]] -name = "kvdb" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8891bd853eff90e33024195d79d578dc984c82f9e0715fcd2b525a0c19d52811" -dependencies = [ - "parity-util-mem", - "smallvec 1.6.1", -] - -[[package]] -name = "kvdb-memorydb" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a0da8e08caf08d384a620ec19bb6c9b85c84137248e202617fb91881f25912" -dependencies = [ - "kvdb", - "parity-util-mem", - "parking_lot 0.11.1", -] - -[[package]] -name = "kvdb-rocksdb" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34446c373ccc494c2124439281c198c7636ccdc2752c06722bbffd56d459c1e4" -dependencies = [ - "fs-swap", - "kvdb", - "log", - "num_cpus", - "owning_ref", - "parity-util-mem", - "parking_lot 0.11.1", - "regex", - "rocksdb", - "smallvec 1.6.1", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "leb128" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" - -[[package]] -name = "libc" -version = "0.2.97" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6" - -[[package]] -name = "libloading" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" -dependencies = [ - "cc", - "winapi 0.3.9", -] - -[[package]] -name = "libm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" - -[[package]] -name = "libnghttp2-sys" -version = "0.1.6+1.43.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0af55541a8827e138d59ec9e5877fb6095ece63fb6f4da45e7491b4fbd262855" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "libp2p" -version = "0.37.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08053fbef67cd777049ef7a95ebaca2ece370b4ed7712c3fa404d69a88cb741b" -dependencies = [ - "atomic", - "bytes 1.0.1", - "futures 0.3.13", - "lazy_static", - "libp2p-core", - "libp2p-deflate", - "libp2p-dns", - "libp2p-floodsub", - "libp2p-gossipsub", - "libp2p-identify", - "libp2p-kad", - "libp2p-mdns", - "libp2p-mplex", - "libp2p-noise", - "libp2p-ping", - "libp2p-plaintext", - "libp2p-pnet", - "libp2p-relay", - "libp2p-request-response", - "libp2p-swarm", - "libp2p-swarm-derive", - "libp2p-tcp", - "libp2p-uds", - "libp2p-wasm-ext", - "libp2p-websocket", - "libp2p-yamux", - "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "smallvec 1.6.1", - "wasm-timer", -] - -[[package]] -name = "libp2p-core" -version = "0.28.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "554d3e7e9e65f939d66b75fd6a4c67f258fe250da61b91f46c545fc4a89b51d9" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures 0.3.13", - "futures-timer 3.0.2", - "lazy_static", - "libsecp256k1", - "log", - "multihash", - "multistream-select", - "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "prost", - "prost-build", - "rand 0.7.3", - "ring", - "rw-stream-sink", - "sha2 0.9.3", - "smallvec 1.6.1", - "thiserror", - "unsigned-varint 0.7.0", - "void", - "zeroize", -] - -[[package]] -name = "libp2p-deflate" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2181a641cd15f9b6ba71b1335800f309012a0a97a29ffaabbbf40e9d3d58f08" -dependencies = [ - "flate2", - "futures 0.3.13", - "libp2p-core", -] - -[[package]] -name = "libp2p-dns" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e63dab8b5ff35e0c101a3e51e843ba782c07bbb1682f5fd827622e0d02b98b" -dependencies = [ - "async-std-resolver", - "futures 0.3.13", - "libp2p-core", - "log", - "smallvec 1.6.1", - "trust-dns-resolver", -] - -[[package]] -name = "libp2p-floodsub" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48a9b570f6766301d9c4aa00fce3554cad1598e2f466debbc4dde909028417cf" -dependencies = [ - "cuckoofilter", - "fnv", - "futures 0.3.13", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "smallvec 1.6.1", -] - -[[package]] -name = "libp2p-gossipsub" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7b0c8506a6ec3344b9e706d7c7a6dba826f8ede735cfe13dde12a8c263c4af9" -dependencies = [ - "asynchronous-codec 0.6.0", - "base64 0.13.0", - "byteorder", - "bytes 1.0.1", - "fnv", - "futures 0.3.13", - "hex_fmt", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "regex", - "sha2 0.9.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "wasm-timer", -] - -[[package]] -name = "libp2p-identify" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f668f00efd9883e8b7bcc582eaf0164615792608f886f6577da18bcbeea0a46" -dependencies = [ - "futures 0.3.13", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "smallvec 1.6.1", - "wasm-timer", -] - -[[package]] -name = "libp2p-kad" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07312ebe5ee4fd2404447a0609814574df55c65d4e20838b957bbd34907d820" -dependencies = [ - "arrayvec 0.5.2", - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "either", - "fnv", - "futures 0.3.13", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.9.3", - "smallvec 1.6.1", - "uint", - "unsigned-varint 0.7.0", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-mdns" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41e282f974c4bea56db8acca50387f05189406e346318cb30190b0bde662961e" -dependencies = [ - "async-io", - "data-encoding", - "dns-parser", - "futures 0.3.13", - "if-watch", - "lazy_static", - "libp2p-core", - "libp2p-swarm", - "log", - "rand 0.8.3", - "smallvec 1.6.1", - "socket2 0.4.0", - "void", -] - -[[package]] -name = "libp2p-mplex" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e9b544335d1ed30af71daa96edbefadef6f19c7a55f078b9fc92c87163105d" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "futures 0.3.13", - "libp2p-core", - "log", - "nohash-hasher", - "parking_lot 0.11.1", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", -] - -[[package]] -name = "libp2p-noise" -version = "0.30.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36db0f0db3b0433f5b9463f1c0cd9eadc0a3734a9170439ce501ff99733a88bd" -dependencies = [ - "bytes 1.0.1", - "curve25519-dalek 3.0.2", - "futures 0.3.13", - "lazy_static", - "libp2p-core", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.9.3", - "snow", - "static_assertions", - "x25519-dalek", - "zeroize", -] - -[[package]] -name = "libp2p-ping" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4bfaffac63bf3c7ec11ed9d8879d455966ddea7e78ee14737f0b6dce0d1cd1" -dependencies = [ - "futures 0.3.13", - "libp2p-core", - "libp2p-swarm", - "log", - "rand 0.7.3", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-plaintext" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8c37b4d2a075b4be8442760a5f8c037180f0c8dd5b5734b9978ab868b3aa11" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "futures 0.3.13", - "libp2p-core", - "log", - "prost", - "prost-build", - "unsigned-varint 0.7.0", - "void", -] - -[[package]] -name = "libp2p-pnet" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599" -dependencies = [ - "futures 0.3.13", - "log", - "pin-project 1.0.5", - "rand 0.7.3", - "salsa20", - "sha3", -] - -[[package]] -name = "libp2p-relay" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8786aca3f18671d8776289706a5521f6c9124a820f69e358de214b9939440d" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "futures 0.3.13", - "futures-timer 3.0.2", - "libp2p-core", - "libp2p-swarm", - "log", - "pin-project 1.0.5", - "prost", - "prost-build", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-request-response" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cdbe172f08e6d0f95fa8634e273d4c4268c4063de2e33e7435194b0130c62e3" -dependencies = [ - "async-trait", - "bytes 1.0.1", - "futures 0.3.13", - "libp2p-core", - "libp2p-swarm", - "log", - "lru", - "minicbor", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "wasm-timer", -] - -[[package]] -name = "libp2p-swarm" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e04d8e1eef675029ec728ba14e8d0da7975d84b6679b699b4ae91a1de9c3a92" -dependencies = [ - "either", - "futures 0.3.13", - "libp2p-core", - "log", - "rand 0.7.3", - "smallvec 1.6.1", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-swarm-derive" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "365b0a699fea5168676840567582a012ea297b1ca02eee467e58301b9c9c5eed" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "libp2p-tcp" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b1a27d21c477951799e99d5c105d78868258502ce092988040a808d5a19bbd9" -dependencies = [ - "async-io", - "futures 0.3.13", - "futures-timer 3.0.2", - "if-watch", - "ipnet", - "libc", - "libp2p-core", - "log", - "socket2 0.4.0", -] - -[[package]] -name = "libp2p-uds" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffd6564bb3b7ff203661ccbb69003c2b551e34cef974f2d6c6a28306a12170b5" -dependencies = [ - "async-std", - "futures 0.3.13", - "libp2p-core", - "log", -] - -[[package]] -name = "libp2p-wasm-ext" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef45d61e43c313531b5e903e4e8415212ff6338e0c54c47da5b9b412b5760de" -dependencies = [ - "futures 0.3.13", - "js-sys", - "libp2p-core", - "parity-send-wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", -] - -[[package]] -name = "libp2p-websocket" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cace60995ef6f637e4752cccbb2590f6bc358e8741a0d066307636c69a4b3a74" -dependencies = [ - "either", - "futures 0.3.13", - "futures-rustls", - "libp2p-core", - "log", - "quicksink", - "rw-stream-sink", - "soketto", - "url 2.2.1", - "webpki-roots", -] - -[[package]] -name = "libp2p-yamux" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f35da42cfc6d5cb0dcf3ad6881bc68d146cdf38f98655e09e33fbba4d13eabc4" -dependencies = [ - "futures 0.3.13", - "libp2p-core", - "parking_lot 0.11.1", - "thiserror", - "yamux", -] - -[[package]] -name = "librocksdb-sys" -version = "6.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09" -dependencies = [ - "bindgen", - "cc", - "glob", - "libc", -] - -[[package]] -name = "libsecp256k1" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" -dependencies = [ - "arrayref", - "crunchy", - "digest 0.8.1", - "hmac-drbg", - "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", - "typenum", -] - -[[package]] -name = "libz-sys" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "linked-hash-map" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" - -[[package]] -name = "linked_hash_set" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "linregress" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0ad4b5cc8385a881c561fac3501353d63d2a2b7a357b5064d71815c9a92724" -dependencies = [ - "nalgebra", - "statrs", -] - -[[package]] -name = "lock_api" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "lock_api" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if 1.0.0", - "value-bag", -] - -[[package]] -name = "lru" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f374d42cdfc1d7dbf3d3dec28afab2eb97ffbf43a3234d795b5986dbf4b90ba" -dependencies = [ - "hashbrown", -] - -[[package]] -name = "lru-cache" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - -[[package]] -name = "matrixmultiply" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1" -dependencies = [ - "rawpointer", -] - -[[package]] -name = "max-encoded-len" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "impl-trait-for-tuples", - "max-encoded-len-derive", - "parity-scale-codec", - "primitive-types", -] - -[[package]] -name = "max-encoded-len-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "memchr" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" - -[[package]] -name = "memmap" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "memmap2" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e3e85b970d650e2ae6d70592474087051c11c54da7f7b4949725c5735fbcc6" -dependencies = [ - "libc", -] - -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - -[[package]] -name = "memoffset" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" -dependencies = [ - "autocfg", -] - -[[package]] -name = "memory-db" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "814bbecfc0451fc314eeea34f05bbcd5b98a7ad7af37faee088b86a1e633f1d4" -dependencies = [ - "hash-db", - "hashbrown", - "parity-util-mem", -] - -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "messages-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "bp-messages", - "bp-runtime", - "futures 0.3.13", - "hex", - "log", - "num-traits", - "parking_lot 0.11.1", - "relay-utils", -] - -[[package]] -name = "millau-bridge-node" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-millau", - "bp-runtime", - "frame-benchmarking", - "frame-benchmarking-cli", - "jsonrpc-core 15.1.0", - "millau-runtime", - "node-inspect", - "pallet-bridge-messages", - "pallet-transaction-payment-rpc", - "sc-basic-authorship", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-consensus-aura", - "sc-executor", - "sc-finality-grandpa", - "sc-finality-grandpa-rpc", - "sc-keystore", - "sc-rpc", - "sc-service", - "sc-telemetry", - "sc-transaction-pool", - "serde_json", - "sp-consensus", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-runtime", - "sp-timestamp", - "structopt", - "substrate-build-script-utils", - "substrate-frame-rpc-system", -] - -[[package]] -name = "millau-runtime" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-millau", - "bp-rialto", - "bp-runtime", - "bp-westend", - "bridge-runtime-common", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "hex-literal 0.3.1", - "pallet-aura", - "pallet-balances", - "pallet-bridge-dispatch", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-grandpa", - "pallet-randomness-collective-flip", - "pallet-session", - "pallet-shift-session-manager", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std", - "sp-transaction-pool", - "sp-trie", - "sp-version", - "substrate-wasm-builder", -] - -[[package]] -name = "mime" -version = "0.3.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" - -[[package]] -name = "minicbor" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea79ce4ab9f445ec6b71833a2290ac0a29c9dde0fa7cae4c481eecae021d9bd9" -dependencies = [ - "minicbor-derive", -] - -[[package]] -name = "minicbor-derive" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce18b5423c573a13e80cb3046ea0af6379ef725dc3af4886bdb8f4e5093068" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio-extras" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" -dependencies = [ - "lazycell", - "log", - "mio", - "slab", -] - -[[package]] -name = "mio-named-pipes" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" -dependencies = [ - "log", - "mio", - "miow 0.3.6", - "winapi 0.3.9", -] - -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", -] - -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2 0.3.19", - "winapi 0.3.9", -] - -[[package]] -name = "more-asserts" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" - -[[package]] -name = "multibase" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b78c60039650ff12e140ae867ef5299a58e19dded4d334c849dc7177083667e2" -dependencies = [ - "base-x", - "data-encoding", - "data-encoding-macro", -] - -[[package]] -name = "multihash" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" -dependencies = [ - "blake2b_simd", - "blake2s_simd", - "blake3", - "digest 0.9.0", - "generic-array 0.14.4", - "multihash-derive", - "sha2 0.9.3", - "sha3", - "unsigned-varint 0.5.1", -] - -[[package]] -name = "multihash-derive" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85ee3c48cb9d9b275ad967a0e96715badc13c6029adb92f34fa17b9ff28fd81f" -dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "multimap" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" - -[[package]] -name = "multistream-select" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5df70763c86c98487451f307e1b68b4100da9076f4c12146905fc2054277f4e8" -dependencies = [ - "bytes 1.0.1", - "futures 0.3.13", - "log", - "pin-project 1.0.5", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", -] - -[[package]] -name = "nalgebra" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486" -dependencies = [ - "approx", - "generic-array 0.13.2", - "matrixmultiply", - "num-complex", - "num-rational", - "num-traits", - "rand 0.7.3", - "rand_distr", - "simba", - "typenum", -] - -[[package]] -name = "names" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da" -dependencies = [ - "rand 0.3.23", -] - -[[package]] -name = "nb-connect" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670361df1bc2399ee1ff50406a0d422587dd3bb0da596e1978fe8e05dabddf4f" -dependencies = [ - "libc", - "socket2 0.3.19", -] - -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "node-inspect" -version = "0.8.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "log", - "parity-scale-codec", - "sc-cli", - "sc-client-api", - "sc-service", - "sp-blockchain", - "sp-core", - "sp-runtime", - "structopt", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - -[[package]] -name = "nom" -version = "5.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" -dependencies = [ - "memchr", - "version_check", -] - -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-format" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" -dependencies = [ - "arrayvec 0.4.12", - "itoa", -] - -[[package]] -name = "num-integer" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = [ - "autocfg", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" - -[[package]] -name = "object" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170" -dependencies = [ - "crc32fast", - "indexmap", -] - -[[package]] -name = "once_cell" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10acf907b94fc1b1a152d08ef97e7759650268cf986bf127f387e602b02c7e5a" -dependencies = [ - "parking_lot 0.11.1", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" - -[[package]] -name = "openssl-sys" -version = "0.9.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" -dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - -[[package]] -name = "pallet-aura" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "pallet-session", - "pallet-timestamp", - "parity-scale-codec", - "sp-application-crypto", - "sp-consensus-aura", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-authorship" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-authorship", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-balances" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "max-encoded-len", - "parity-scale-codec", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-currency-exchange" -version = "0.1.0" -dependencies = [ - "bp-currency-exchange", - "bp-header-chain", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-dispatch" -version = "0.1.0" -dependencies = [ - "bp-message-dispatch", - "bp-runtime", - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-eth-poa" -version = "0.1.0" -dependencies = [ - "bp-eth-poa", - "frame-benchmarking", - "frame-support", - "frame-system", - "hex-literal 0.3.1", - "libsecp256k1", - "log", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-grandpa" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-runtime", - "bp-test-utils", - "finality-grandpa", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "num-traits", - "parity-scale-codec", - "serde", - "sp-finality-grandpa", - "sp-io", - "sp-runtime", - "sp-std", - "sp-trie", -] - -[[package]] -name = "pallet-bridge-messages" -version = "0.1.0" -dependencies = [ - "bitvec", - "bp-message-dispatch", - "bp-messages", - "bp-rialto", - "bp-runtime", - "frame-benchmarking", - "frame-support", - "frame-system", - "hex", - "hex-literal 0.3.1", - "log", - "num-traits", - "pallet-balances", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-grandpa" -version = "3.1.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "pallet-authorship", - "pallet-session", - "parity-scale-codec", - "sp-application-crypto", - "sp-core", - "sp-finality-grandpa", - "sp-io", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-std", -] - -[[package]] -name = "pallet-randomness-collective-flip" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "safe-mix", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-session" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "log", - "pallet-timestamp", - "parity-scale-codec", - "sp-core", - "sp-io", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-std", - "sp-trie", -] - -[[package]] -name = "pallet-shift-session-manager" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "pallet-session", - "parity-scale-codec", - "serde", - "sp-core", - "sp-runtime", - "sp-staking", - "sp-std", -] - -[[package]] -name = "pallet-sudo" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-timestamp" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "sp-inherents", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "pallet-transaction-payment" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "smallvec 1.6.1", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-transaction-payment-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-rpc", - "sp-runtime", -] - -[[package]] -name = "pallet-transaction-payment-rpc-runtime-api" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "pallet-transaction-payment", - "parity-scale-codec", - "sp-api", - "sp-runtime", -] - -[[package]] -name = "parity-bytes" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" - -[[package]] -name = "parity-db" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e337f62db341435f0da05b8f6b97e984ef4ea5800510cd07c2d624688c40b47" -dependencies = [ - "blake2-rfc", - "crc32fast", - "fs2", - "hex", - "libc", - "log", - "memmap2", - "parking_lot 0.11.1", - "rand 0.8.3", -] - -[[package]] -name = "parity-multiaddr" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" -dependencies = [ - "arrayref", - "bs58", - "byteorder", - "data-encoding", - "multihash", - "percent-encoding 2.1.0", - "serde", - "static_assertions", - "unsigned-varint 0.7.0", - "url 2.2.1", -] - -[[package]] -name = "parity-scale-codec" -version = "2.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f518afaa5a47d0d6386229b0a6e01e86427291d643aa4cabb4992219f504f8" -dependencies = [ - "arrayvec 0.7.0", - "bitvec", - "byte-slice-cast", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f44c5f94427bd0b5076e8f7e15ca3f60a4d8ac0077e4793884e6fdfd8915344e" -dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-send-wrapper" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" - -[[package]] -name = "parity-tokio-ipc" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "libc", - "log", - "mio-named-pipes", - "miow 0.3.6", - "rand 0.7.3", - "tokio 0.1.22", - "tokio-named-pipes", - "tokio-uds", - "winapi 0.3.9", -] - -[[package]] -name = "parity-util-mem" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664a8c6b8e62d8f9f2f937e391982eb433ab285b4cd9545b342441e04a906e42" -dependencies = [ - "cfg-if 1.0.0", - "hashbrown", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot 0.11.1", - "primitive-types", - "smallvec 1.6.1", - "winapi 0.3.9", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac" -dependencies = [ - "byteorder", -] - -[[package]] -name = "parity-wasm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" - -[[package]] -name = "parity-ws" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e02a625dd75084c2a7024f07c575b61b782f729d18702dabb3cdbf31911dc61" -dependencies = [ - "byteorder", - "bytes 0.4.12", - "httparse", - "log", - "mio", - "mio-extras", - "rand 0.7.3", - "sha-1 0.8.2", - "slab", - "url 2.2.1", -] - -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" - -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.6.2", - "rustc_version", -] - -[[package]] -name = "parking_lot" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.7.2", -] - -[[package]] -name = "parking_lot" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" -dependencies = [ - "instant", - "lock_api 0.4.2", - "parking_lot_core 0.8.3", -] - -[[package]] -name = "parking_lot_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "rustc_version", - "smallvec 0.6.14", - "winapi 0.3.9", -] - -[[package]] -name = "parking_lot_core" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "smallvec 1.6.1", - "winapi 0.3.9", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.2.5", - "smallvec 1.6.1", - "winapi 0.3.9", -] - -[[package]] -name = "paste" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] - -[[package]] -name = "paste" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" - -[[package]] -name = "paste-impl" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" -dependencies = [ - "proc-macro-hack", -] - -[[package]] -name = "pbkdf2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" -dependencies = [ - "byteorder", - "crypto-mac 0.7.0", -] - -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac 0.8.0", -] - -[[package]] -name = "pdqselect" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec91767ecc0a0bbe558ce8c9da33c068066c57ecc8bb8477ef8c1ad3ef77c27" - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" - -[[package]] -name = "pest" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" -dependencies = [ - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pest_meta" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" -dependencies = [ - "maplit", - "pest", - "sha-1 0.8.2", -] - -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "pin-project" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" -dependencies = [ - "pin-project-internal 0.4.27", -] - -[[package]] -name = "pin-project" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" -dependencies = [ - "pin-project-internal 1.0.5", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - -[[package]] -name = "pin-project-lite" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" - -[[package]] -name = "plain_hasher" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e19e6491bdde87c2c43d70f4c194bc8a758f2eb732df00f61e43f7362e3b4cc" -dependencies = [ - "crunchy", -] - -[[package]] -name = "platforms" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" - -[[package]] -name = "polling" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "log", - "wepoll-sys", - "winapi 0.3.9", -] - -[[package]] -name = "poly1305" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" -dependencies = [ - "cpuid-bool 0.2.0", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" -dependencies = [ - "cpuid-bool 0.2.0", - "opaque-debug 0.3.0", - "universal-hash", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" - -[[package]] -name = "primitive-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2415937401cb030a2a0a4d922483f945fa068f52a7dbb22ce0fe5f2b6f6adace" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - -[[package]] -name = "proc-macro-crate" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" -dependencies = [ - "thiserror", - "toml", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - -[[package]] -name = "proc-macro2" -version = "1.0.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "prometheus" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" -dependencies = [ - "cfg-if 1.0.0", - "fnv", - "lazy_static", - "parking_lot 0.11.1", - "regex", - "thiserror", -] - -[[package]] -name = "prost" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" -dependencies = [ - "bytes 1.0.1", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" -dependencies = [ - "bytes 1.0.1", - "heck", - "itertools 0.9.0", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "tempfile", - "which 4.0.2", -] - -[[package]] -name = "prost-derive" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" -dependencies = [ - "anyhow", - "itertools 0.9.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" -dependencies = [ - "bytes 1.0.1", - "prost", -] - -[[package]] -name = "psm" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abf49e5417290756acfd26501536358560c4a5cc4a0934d390939acb3e7083a" -dependencies = [ - "cc", -] - -[[package]] -name = "pwasm-utils" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0e517f47d9964362883182404b68d0b6949382c0baa40aa5ffca94f5f1e3481" -dependencies = [ - "byteorder", - "log", - "parity-wasm 0.42.2", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quick-error" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" - -[[package]] -name = "quicksink" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project-lite 0.1.11", -] - -[[package]] -name = "quote" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" -dependencies = [ - "libc", - "rand 0.4.6", -] - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi 0.3.9", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", - "rand_pcg", -] - -[[package]] -name = "rand" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" -dependencies = [ - "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.2", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" -dependencies = [ - "getrandom 0.2.2", -] - -[[package]] -name = "rand_distr" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" -dependencies = [ - "rand 0.7.3", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_hc" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" -dependencies = [ - "rand_core 0.6.2", -] - -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - -[[package]] -name = "rayon" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" -dependencies = [ - "autocfg", - "crossbeam-deque 0.8.0", - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" -dependencies = [ - "crossbeam-channel", - "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.3", - "lazy_static", - "num_cpus", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "redox_syscall" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" -dependencies = [ - "bitflags", -] - -[[package]] -name = "redox_users" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" -dependencies = [ - "getrandom 0.1.16", - "redox_syscall 0.1.57", - "rust-argon2", -] - -[[package]] -name = "redox_users" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" -dependencies = [ - "getrandom 0.2.2", - "redox_syscall 0.2.5", -] - -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "regalloc" -version = "0.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" -dependencies = [ - "log", - "rustc-hash", - "serde", - "smallvec 1.6.1", -] - -[[package]] -name = "regex" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", -] - -[[package]] -name = "regex-automata" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" -dependencies = [ - "byteorder", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" - -[[package]] -name = "region" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0" -dependencies = [ - "bitflags", - "libc", - "mach", - "winapi 0.3.9", -] - -[[package]] -name = "relay-ethereum-client" -version = "0.1.0" -dependencies = [ - "async-std", - "bp-eth-poa", - "headers-relay", - "hex-literal 0.3.1", - "jsonrpsee-proc-macros", - "jsonrpsee-ws-client", - "libsecp256k1", - "log", - "parity-scale-codec", - "relay-utils", - "web3", -] - -[[package]] -name = "relay-kusama-client" -version = "0.1.0" -dependencies = [ - "bp-kusama", - "frame-support", - "frame-system", - "headers-relay", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-millau-client" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "headers-relay", - "millau-runtime", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-polkadot-client" -version = "0.1.0" -dependencies = [ - "bp-polkadot", - "frame-support", - "frame-system", - "headers-relay", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-rialto-client" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "headers-relay", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "rialto-runtime", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-rococo-client" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-message-dispatch", - "bp-messages", - "bp-polkadot-core", - "bp-rococo", - "bp-runtime", - "bp-wococo", - "bridge-runtime-common", - "frame-support", - "frame-system", - "headers-relay", - "pallet-bridge-dispatch", - "pallet-bridge-messages", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-substrate-client" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "bp-header-chain", - "bp-messages", - "bp-runtime", - "finality-relay", - "frame-support", - "frame-system", - "futures 0.3.13", - "headers-relay", - "jsonrpsee-proc-macros", - "jsonrpsee-ws-client", - "log", - "num-traits", - "pallet-balances", - "parity-scale-codec", - "rand 0.7.3", - "relay-utils", - "sc-rpc-api", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", - "sp-std", - "sp-storage", - "sp-trie", - "sp-version", -] - -[[package]] -name = "relay-utils" -version = "0.1.0" -dependencies = [ - "ansi_term 0.12.1", - "async-std", - "async-trait", - "backoff", - "env_logger 0.8.3", - "futures 0.3.13", - "isahc", - "jsonpath_lib", - "log", - "num-traits", - "serde_json", - "substrate-prometheus-endpoint", - "sysinfo", - "time 0.2.25", -] - -[[package]] -name = "relay-westend-client" -version = "0.1.0" -dependencies = [ - "bp-westend", - "frame-support", - "frame-system", - "headers-relay", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-wococo-client" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-message-dispatch", - "bp-messages", - "bp-polkadot-core", - "bp-rococo", - "bp-runtime", - "bp-wococo", - "bridge-runtime-common", - "frame-support", - "frame-system", - "headers-relay", - "pallet-bridge-dispatch", - "pallet-bridge-messages", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "resolv-conf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error 1.2.3", -] - -[[package]] -name = "retain_mut" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c17925a9027d298a4603d286befe3f9dc0e8ed02523141914eb628798d6e5b" - -[[package]] -name = "rialto-bridge-node" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-rialto", - "bp-runtime", - "frame-benchmarking", - "frame-benchmarking-cli", - "jsonrpc-core 15.1.0", - "node-inspect", - "pallet-bridge-messages", - "pallet-transaction-payment-rpc", - "rialto-runtime", - "sc-basic-authorship", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-consensus-aura", - "sc-executor", - "sc-finality-grandpa", - "sc-finality-grandpa-rpc", - "sc-keystore", - "sc-rpc", - "sc-service", - "sc-telemetry", - "sc-transaction-pool", - "serde_json", - "sp-consensus", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-runtime", - "sp-timestamp", - "structopt", - "substrate-build-script-utils", - "substrate-frame-rpc-system", -] - -[[package]] -name = "rialto-runtime" -version = "0.1.0" -dependencies = [ - "bp-currency-exchange", - "bp-eth-poa", - "bp-header-chain", - "bp-message-dispatch", - "bp-messages", - "bp-millau", - "bp-rialto", - "bp-runtime", - "bridge-runtime-common", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "hex-literal 0.3.1", - "libsecp256k1", - "log", - "pallet-aura", - "pallet-balances", - "pallet-bridge-currency-exchange", - "pallet-bridge-dispatch", - "pallet-bridge-eth-poa", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-grandpa", - "pallet-randomness-collective-flip", - "pallet-session", - "pallet-shift-session-manager", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std", - "sp-transaction-pool", - "sp-trie", - "sp-version", - "substrate-wasm-builder", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi 0.3.9", -] - -[[package]] -name = "rlp" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54369147e3e7796c9b885c7304db87ca3d09a0a98f72843d532868675bbfba8" -dependencies = [ - "bytes 1.0.1", - "rustc-hex", -] - -[[package]] -name = "rocksdb" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d83c02c429044d58474eaf5ae31e062d0de894e21125b47437ec0edc1397e6" -dependencies = [ - "libc", - "librocksdb-sys", -] - -[[package]] -name = "rpassword" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "rust-argon2" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" -dependencies = [ - "base64 0.13.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils 0.8.3", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver 0.9.0", -] - -[[package]] -name = "rustls" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" -dependencies = [ - "base64 0.12.3", - "log", - "ring", - "sct", - "webpki 0.21.4", -] - -[[package]] -name = "rustls" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" -dependencies = [ - "base64 0.13.0", - "log", - "ring", - "sct", - "webpki 0.21.4", -] - -[[package]] -name = "rustls-native-certs" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" -dependencies = [ - "openssl-probe", - "rustls 0.18.1", - "schannel", - "security-framework", -] - -[[package]] -name = "ruzstd" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d425143485a37727c7a46e689bbe3b883a00f42b4a52c4ac0f44855c1009b00" -dependencies = [ - "byteorder", - "twox-hash", -] - -[[package]] -name = "rw-stream-sink" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" -dependencies = [ - "futures 0.3.13", - "pin-project 0.4.27", - "static_assertions", -] - -[[package]] -name = "ryu" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" - -[[package]] -name = "safe-mix" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3d055a2582e6b00ed7a31c1524040aa391092bf636328350813f3a0605215c" -dependencies = [ - "rustc_version", -] - -[[package]] -name = "salsa20" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" -dependencies = [ - "cipher", -] - -[[package]] -name = "same-file" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "sc-basic-authorship" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "futures-timer 3.0.2", - "log", - "parity-scale-codec", - "sc-block-builder", - "sc-client-api", - "sc-proposer-metrics", - "sc-telemetry", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-transaction-pool", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-block-builder" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "sc-client-api", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-state-machine", -] - -[[package]] -name = "sc-chain-spec" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sc-chain-spec-derive", - "sc-consensus-babe", - "sc-consensus-epochs", - "sc-finality-grandpa", - "sc-network", - "sc-telemetry", - "serde", - "serde_json", - "sp-chain-spec", - "sp-consensus-babe", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "sc-chain-spec-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sc-cli" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "chrono", - "fdlimit", - "futures 0.3.13", - "hex", - "libp2p", - "log", - "names", - "parity-scale-codec", - "rand 0.7.3", - "regex", - "rpassword", - "sc-client-api", - "sc-keystore", - "sc-network", - "sc-service", - "sc-telemetry", - "sc-tracing", - "serde", - "serde_json", - "sp-blockchain", - "sp-core", - "sp-keyring", - "sp-keystore", - "sp-panic-handler", - "sp-runtime", - "sp-utils", - "sp-version", - "structopt", - "thiserror", - "tiny-bip39", - "tokio 0.2.25", -] - -[[package]] -name = "sc-client-api" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "fnv", - "futures 0.3.13", - "hash-db", - "kvdb", - "lazy_static", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-executor", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-database", - "sp-externalities", - "sp-inherents", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-storage", - "sp-transaction-pool", - "sp-trie", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-client-db" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "blake2-rfc", - "hash-db", - "kvdb", - "kvdb-memorydb", - "kvdb-rocksdb", - "linked-hash-map", - "log", - "parity-db", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "sc-client-api", - "sc-executor", - "sc-state-db", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-database", - "sp-runtime", - "sp-state-machine", - "sp-trie", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-consensus" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "parking_lot 0.11.1", - "sc-client-api", - "sp-blockchain", - "sp-consensus", - "sp-runtime", -] - -[[package]] -name = "sc-consensus-aura" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "derive_more", - "futures 0.3.13", - "futures-timer 3.0.2", - "log", - "parity-scale-codec", - "sc-block-builder", - "sc-client-api", - "sc-consensus-slots", - "sc-telemetry", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-consensus-aura", - "sp-consensus-slots", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keystore", - "sp-runtime", - "sp-version", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-consensus-babe" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "derive_more", - "fork-tree", - "futures 0.3.13", - "futures-timer 3.0.2", - "log", - "merlin", - "num-bigint", - "num-rational", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.1", - "pdqselect", - "rand 0.7.3", - "retain_mut", - "sc-client-api", - "sc-consensus-epochs", - "sc-consensus-slots", - "sc-consensus-uncles", - "sc-keystore", - "sc-telemetry", - "schnorrkel", - "serde", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-consensus-slots", - "sp-consensus-vrf", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keystore", - "sp-runtime", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-consensus-epochs" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "fork-tree", - "parity-scale-codec", - "sc-client-api", - "sc-consensus", - "sp-blockchain", - "sp-runtime", -] - -[[package]] -name = "sc-consensus-slots" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "futures 0.3.13", - "futures-timer 3.0.2", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "sc-client-api", - "sc-telemetry", - "sp-api", - "sp-application-crypto", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-consensus-slots", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-state-machine", - "sp-timestamp", - "sp-trie", - "thiserror", -] - -[[package]] -name = "sc-consensus-uncles" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "sc-client-api", - "sp-authorship", - "sp-runtime", - "thiserror", -] - -[[package]] -name = "sc-executor" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "lazy_static", - "libsecp256k1", - "log", - "parity-scale-codec", - "parity-wasm 0.42.2", - "parking_lot 0.11.1", - "sc-executor-common", - "sc-executor-wasmi", - "sc-executor-wasmtime", - "sp-api", - "sp-core", - "sp-externalities", - "sp-io", - "sp-panic-handler", - "sp-runtime-interface", - "sp-serializer", - "sp-tasks", - "sp-trie", - "sp-version", - "sp-wasm-interface", - "wasmi", -] - -[[package]] -name = "sc-executor-common" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "parity-scale-codec", - "pwasm-utils", - "sp-allocator", - "sp-core", - "sp-maybe-compressed-blob", - "sp-serializer", - "sp-wasm-interface", - "thiserror", - "wasmi", -] - -[[package]] -name = "sc-executor-wasmi" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "log", - "parity-scale-codec", - "sc-executor-common", - "sp-allocator", - "sp-core", - "sp-runtime-interface", - "sp-wasm-interface", - "wasmi", -] - -[[package]] -name = "sc-executor-wasmtime" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "log", - "parity-scale-codec", - "parity-wasm 0.42.2", - "sc-executor-common", - "scoped-tls", - "sp-allocator", - "sp-core", - "sp-runtime-interface", - "sp-wasm-interface", - "wasmtime", -] - -[[package]] -name = "sc-finality-grandpa" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "derive_more", - "dyn-clone", - "finality-grandpa", - "fork-tree", - "futures 0.3.13", - "futures-timer 3.0.2", - "linked-hash-map", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rand 0.7.3", - "sc-block-builder", - "sc-client-api", - "sc-consensus", - "sc-keystore", - "sc-network", - "sc-network-gossip", - "sc-telemetry", - "serde_json", - "sp-api", - "sp-application-crypto", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-keystore", - "sp-runtime", - "sp-utils", - "substrate-prometheus-endpoint", - "wasm-timer", -] - -[[package]] -name = "sc-finality-grandpa-rpc" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "finality-grandpa", - "futures 0.3.13", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-pubsub", - "log", - "parity-scale-codec", - "sc-client-api", - "sc-finality-grandpa", - "sc-rpc", - "serde", - "serde_json", - "sp-blockchain", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "sc-informant" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "ansi_term 0.12.1", - "futures 0.3.13", - "futures-timer 3.0.2", - "log", - "parity-util-mem", - "sc-client-api", - "sc-network", - "sp-blockchain", - "sp-runtime", - "sp-transaction-pool", - "wasm-timer", -] - -[[package]] -name = "sc-keystore" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "derive_more", - "futures 0.3.13", - "futures-util", - "hex", - "merlin", - "parking_lot 0.11.1", - "rand 0.7.3", - "serde_json", - "sp-application-crypto", - "sp-core", - "sp-keystore", - "subtle 2.4.0", -] - -[[package]] -name = "sc-light" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "hash-db", - "lazy_static", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-client-api", - "sc-executor", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-externalities", - "sp-runtime", - "sp-state-machine", -] - -[[package]] -name = "sc-network" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-std", - "async-trait", - "asynchronous-codec 0.5.0", - "bitflags", - "bs58", - "bytes 1.0.1", - "cid", - "derive_more", - "either", - "erased-serde", - "fnv", - "fork-tree", - "futures 0.3.13", - "futures-timer 3.0.2", - "hex", - "ip_network", - "libp2p", - "linked-hash-map", - "linked_hash_set", - "log", - "lru", - "nohash-hasher", - "parity-scale-codec", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "prost", - "prost-build", - "rand 0.7.3", - "sc-block-builder", - "sc-client-api", - "sc-peerset", - "serde", - "serde_json", - "smallvec 1.6.1", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-runtime", - "sp-utils", - "substrate-prometheus-endpoint", - "thiserror", - "unsigned-varint 0.6.0", - "void", - "wasm-timer", - "zeroize", -] - -[[package]] -name = "sc-network-gossip" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "futures-timer 3.0.2", - "libp2p", - "log", - "lru", - "sc-network", - "sp-runtime", - "substrate-prometheus-endpoint", - "tracing", - "wasm-timer", -] - -[[package]] -name = "sc-offchain" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures 0.3.13", - "futures-timer 3.0.2", - "hex", - "hyper 0.13.10", - "hyper-rustls", - "log", - "num_cpus", - "parity-scale-codec", - "parking_lot 0.11.1", - "rand 0.7.3", - "sc-client-api", - "sc-keystore", - "sc-network", - "sp-api", - "sp-core", - "sp-offchain", - "sp-runtime", - "sp-utils", - "threadpool", -] - -[[package]] -name = "sc-peerset" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "libp2p", - "log", - "serde_json", - "sp-utils", - "wasm-timer", -] - -[[package]] -name = "sc-proposer-metrics" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "log", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "hash-db", - "jsonrpc-core 15.1.0", - "jsonrpc-pubsub", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-block-builder", - "sc-client-api", - "sc-executor", - "sc-keystore", - "sc-rpc-api", - "sc-tracing", - "serde_json", - "sp-api", - "sp-blockchain", - "sp-chain-spec", - "sp-core", - "sp-keystore", - "sp-offchain", - "sp-rpc", - "sp-runtime", - "sp-session", - "sp-state-machine", - "sp-tracing", - "sp-transaction-pool", - "sp-utils", - "sp-version", -] - -[[package]] -name = "sc-rpc-api" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "futures 0.3.13", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-pubsub", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "serde", - "serde_json", - "sp-chain-spec", - "sp-core", - "sp-rpc", - "sp-runtime", - "sp-tracing", - "sp-transaction-pool", - "sp-version", -] - -[[package]] -name = "sc-rpc-server" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.1.31", - "jsonrpc-core 15.1.0", - "jsonrpc-http-server", - "jsonrpc-ipc-server", - "jsonrpc-pubsub", - "jsonrpc-ws-server", - "log", - "serde", - "serde_json", - "sp-runtime", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-service" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "directories", - "exit-future", - "futures 0.1.31", - "futures 0.3.13", - "futures-timer 3.0.2", - "hash-db", - "jsonrpc-core 15.1.0", - "jsonrpc-pubsub", - "lazy_static", - "log", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rand 0.7.3", - "sc-block-builder", - "sc-chain-spec", - "sc-client-api", - "sc-client-db", - "sc-executor", - "sc-informant", - "sc-keystore", - "sc-light", - "sc-network", - "sc-offchain", - "sc-rpc", - "sc-rpc-server", - "sc-telemetry", - "sc-tracing", - "sc-transaction-pool", - "serde", - "serde_json", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-externalities", - "sp-inherents", - "sp-io", - "sp-keystore", - "sp-runtime", - "sp-session", - "sp-state-machine", - "sp-storage", - "sp-tracing", - "sp-transaction-pool", - "sp-transaction-storage-proof", - "sp-trie", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", - "tempfile", - "thiserror", - "tracing", - "tracing-futures", - "wasm-timer", -] - -[[package]] -name = "sc-state-db" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "log", - "parity-scale-codec", - "parity-util-mem", - "parity-util-mem-derive", - "parking_lot 0.11.1", - "sc-client-api", - "sp-core", - "thiserror", -] - -[[package]] -name = "sc-telemetry" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "chrono", - "futures 0.3.13", - "libp2p", - "log", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rand 0.7.3", - "serde", - "serde_json", - "take_mut", - "thiserror", - "void", - "wasm-timer", -] - -[[package]] -name = "sc-tracing" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "ansi_term 0.12.1", - "atty", - "erased-serde", - "lazy_static", - "log", - "once_cell", - "parking_lot 0.11.1", - "regex", - "rustc-hash", - "sc-client-api", - "sc-rpc-server", - "sc-telemetry", - "sc-tracing-proc-macro", - "serde", - "serde_json", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-core", - "sp-rpc", - "sp-runtime", - "sp-storage", - "sp-tracing", - "thiserror", - "tracing", - "tracing-log", - "tracing-subscriber", - "wasm-bindgen", - "wasm-timer", - "web-sys", -] - -[[package]] -name = "sc-tracing-proc-macro" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sc-transaction-graph" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "futures 0.3.13", - "linked-hash-map", - "log", - "parity-util-mem", - "parking_lot 0.11.1", - "retain_mut", - "serde", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-transaction-pool", - "sp-utils", - "thiserror", - "wasm-timer", -] - -[[package]] -name = "sc-transaction-pool" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "intervalier", - "log", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "sc-client-api", - "sc-transaction-graph", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-tracing", - "sp-transaction-pool", - "sp-utils", - "substrate-prometheus-endpoint", - "thiserror", - "wasm-timer", -] - -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi 0.3.9", -] - -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "curve25519-dalek 2.1.2", - "getrandom 0.1.16", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "sha2 0.8.2", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scroll" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" -dependencies = [ - "scroll_derive", -] - -[[package]] -name = "scroll_derive" -version = "0.10.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sct" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "secrecy" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" -dependencies = [ - "zeroize", -] - -[[package]] -name = "security-framework" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys 0.7.0", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" -dependencies = [ - "core-foundation-sys 0.7.0", - "libc", -] - -[[package]] -name = "semver" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" -dependencies = [ - "semver-parser 0.7.0", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser 0.7.0", -] - -[[package]] -name = "semver" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" -dependencies = [ - "semver-parser 0.10.2", - "serde", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "semver-parser" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" -dependencies = [ - "pest", -] - -[[package]] -name = "serde" -version = "1.0.124" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-big-array" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883eee5198ea51720eab8be52a36cf6c0164ac90eea0ed95b649d5e35382404e" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.124" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" -dependencies = [ - "indexmap", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha-1" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -name = "sha-1" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpuid-bool 0.1.2", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -name = "sha2" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpuid-bool 0.1.2", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sharded-slab" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "shlex" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" - -[[package]] -name = "signal-hook" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7f3f92a1da3d6b1d32245d0cbcbbab0cfc45996d8df619c42bccfa6d2bbb5f" -dependencies = [ - "libc", - "signal-hook-registry", -] - -[[package]] -name = "signal-hook-registry" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" - -[[package]] -name = "simba" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb931b1367faadea6b1ab1c306a860ec17aaa5fa39f367d0c744e69d971a1fb2" -dependencies = [ - "approx", - "num-complex", - "num-traits", - "paste 0.1.18", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "slog" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" -dependencies = [ - "erased-serde", -] - -[[package]] -name = "sluice" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fa0333a60ff2e3474a6775cc611840c2a55610c831dd366503474c02f1a28f5" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", -] - -[[package]] -name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "snow" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" -dependencies = [ - "aes-gcm", - "blake2", - "chacha20poly1305", - "rand 0.7.3", - "rand_core 0.5.1", - "ring", - "rustc_version", - "sha2 0.9.3", - "subtle 2.4.0", - "x25519-dalek", -] - -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "socket2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "soketto" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" -dependencies = [ - "base64 0.12.3", - "bytes 0.5.6", - "flate2", - "futures 0.3.13", - "httparse", - "log", - "rand 0.7.3", - "sha-1 0.9.4", -] - -[[package]] -name = "sp-allocator" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "log", - "sp-core", - "sp-std", - "sp-wasm-interface", - "thiserror", -] - -[[package]] -name = "sp-api" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "hash-db", - "log", - "parity-scale-codec", - "sp-api-proc-macro", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-version", - "thiserror", -] - -[[package]] -name = "sp-api-proc-macro" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "blake2-rfc", - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-application-crypto" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "max-encoded-len", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-arithmetic" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "serde", - "sp-debug-derive", - "sp-std", - "static_assertions", -] - -[[package]] -name = "sp-authorship" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "parity-scale-codec", - "sp-inherents", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-block-builder" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-blockchain" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "log", - "lru", - "parity-scale-codec", - "parking_lot 0.11.1", - "sp-api", - "sp-consensus", - "sp-database", - "sp-runtime", - "sp-state-machine", - "thiserror", -] - -[[package]] -name = "sp-chain-spec" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "sp-consensus" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "futures 0.3.13", - "futures-timer 3.0.2", - "libp2p", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "serde", - "sp-api", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", - "thiserror", - "wasm-timer", -] - -[[package]] -name = "sp-consensus-aura" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "parity-scale-codec", - "sp-api", - "sp-application-crypto", - "sp-consensus", - "sp-consensus-slots", - "sp-inherents", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "sp-consensus-babe" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "merlin", - "parity-scale-codec", - "serde", - "sp-api", - "sp-application-crypto", - "sp-consensus", - "sp-consensus-slots", - "sp-consensus-vrf", - "sp-core", - "sp-inherents", - "sp-keystore", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "sp-consensus-slots" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "sp-arithmetic", - "sp-runtime", -] - -[[package]] -name = "sp-consensus-vrf" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "schnorrkel", - "sp-core", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-core" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "base58", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", - "futures 0.3.13", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", - "log", - "max-encoded-len", - "merlin", - "num-traits", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "primitive-types", - "rand 0.7.3", - "regex", - "schnorrkel", - "secrecy", - "serde", - "sha2 0.9.3", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std", - "sp-storage", - "substrate-bip39", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", -] - -[[package]] -name = "sp-database" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "kvdb", - "parking_lot 0.11.1", -] - -[[package]] -name = "sp-debug-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std", - "sp-storage", -] - -[[package]] -name = "sp-finality-grandpa" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "finality-grandpa", - "log", - "parity-scale-codec", - "serde", - "sp-api", - "sp-application-crypto", - "sp-core", - "sp-keystore", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-inherents" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-core", - "sp-runtime", - "sp-std", - "thiserror", -] - -[[package]] -name = "sp-io" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-maybe-compressed-blob", - "sp-runtime-interface", - "sp-state-machine", - "sp-std", - "sp-tracing", - "sp-trie", - "sp-wasm-interface", - "tracing", - "tracing-core", -] - -[[package]] -name = "sp-keyring" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "lazy_static", - "sp-core", - "sp-runtime", - "strum", -] - -[[package]] -name = "sp-keystore" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "derive_more", - "futures 0.3.13", - "merlin", - "parity-scale-codec", - "parking_lot 0.11.1", - "schnorrkel", - "serde", - "sp-core", - "sp-externalities", -] - -[[package]] -name = "sp-maybe-compressed-blob" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "ruzstd", - "zstd", -] - -[[package]] -name = "sp-offchain" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "sp-api", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "sp-panic-handler" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "backtrace", -] - -[[package]] -name = "sp-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "rustc-hash", - "serde", - "sp-core", - "tracing-core", -] - -[[package]] -name = "sp-runtime" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "max-encoded-len", - "parity-scale-codec", - "parity-util-mem", - "paste 1.0.4", - "rand 0.7.3", - "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-runtime-interface" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "Inflector", - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-serializer" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "sp-session" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "sp-api", - "sp-core", - "sp-runtime", - "sp-staking", - "sp-std", -] - -[[package]] -name = "sp-staking" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-state-machine" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.1", - "rand 0.7.3", - "smallvec 1.6.1", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-std", - "sp-trie", - "thiserror", - "tracing", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-std" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" - -[[package]] -name = "sp-storage" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "ref-cast", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-tasks" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "log", - "sp-core", - "sp-externalities", - "sp-io", - "sp-runtime-interface", - "sp-std", -] - -[[package]] -name = "sp-timestamp" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "futures-timer 3.0.2", - "log", - "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime", - "sp-std", - "thiserror", - "wasm-timer", -] - -[[package]] -name = "sp-tracing" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "erased-serde", - "log", - "parity-scale-codec", - "parking_lot 0.10.2", - "serde", - "serde_json", - "slog", - "sp-std", - "tracing", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sp-transaction-pool" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "derive_more", - "futures 0.3.13", - "log", - "parity-scale-codec", - "serde", - "sp-api", - "sp-blockchain", - "sp-runtime", - "thiserror", -] - -[[package]] -name = "sp-transaction-storage-proof" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-trait", - "log", - "parity-scale-codec", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-std", - "sp-trie", -] - -[[package]] -name = "sp-trie" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "sp-core", - "sp-std", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-utils" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "futures 0.3.13", - "futures-core", - "futures-timer 3.0.2", - "lazy_static", - "prometheus", -] - -[[package]] -name = "sp-version" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "serde", - "sp-runtime", - "sp-std", - "sp-version-proc-macro", -] - -[[package]] -name = "sp-version-proc-macro" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "parity-scale-codec", - "proc-macro-crate 1.0.0", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-wasm-interface" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-std", - "wasmi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spinning_top" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bd0ab6b8c375d2d963503b90d3770010d95bc3b5f98036f948dee24bf4e8879" -dependencies = [ - "lock_api 0.4.2", -] - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "statrs" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382" -dependencies = [ - "rand 0.7.3", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "storage-proof-fuzzer" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-runtime", - "bp-test-utils", - "env_logger 0.8.3", - "finality-grandpa", - "frame-support", - "frame-system", - "hash-db", - "honggfuzz", - "log", - "parity-scale-codec", - "sp-core", - "sp-finality-grandpa", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "stream-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" -dependencies = [ - "block-cipher", - "generic-array 0.14.4", -] - -[[package]] -name = "string" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -dependencies = [ - "bytes 0.4.12", -] - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "structopt" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" -dependencies = [ - "clap", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "strum" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "substrate-bip39" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" -dependencies = [ - "hmac 0.7.1", - "pbkdf2 0.3.0", - "schnorrkel", - "sha2 0.8.2", - "zeroize", -] - -[[package]] -name = "substrate-build-script-utils" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "platforms", -] - -[[package]] -name = "substrate-frame-rpc-system" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "frame-system-rpc-runtime-api", - "futures 0.3.13", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "log", - "parity-scale-codec", - "sc-client-api", - "sc-rpc-api", - "serde", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-transaction-pool", -] - -[[package]] -name = "substrate-prometheus-endpoint" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "async-std", - "derive_more", - "futures-util", - "hyper 0.13.10", - "log", - "prometheus", - "tokio 0.2.25", -] - -[[package]] -name = "substrate-relay" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-std", - "async-trait", - "bp-header-chain", - "bp-kusama", - "bp-message-dispatch", - "bp-messages", - "bp-millau", - "bp-polkadot", - "bp-rialto", - "bp-rococo", - "bp-runtime", - "bp-westend", - "bp-wococo", - "bridge-runtime-common", - "finality-grandpa", - "finality-relay", - "frame-support", - "futures 0.3.13", - "headers-relay", - "hex", - "hex-literal 0.3.1", - "log", - "messages-relay", - "millau-runtime", - "num-format", - "num-traits", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "parity-scale-codec", - "paste 1.0.4", - "relay-kusama-client", - "relay-millau-client", - "relay-polkadot-client", - "relay-rialto-client", - "relay-rococo-client", - "relay-substrate-client", - "relay-utils", - "relay-westend-client", - "relay-wococo-client", - "rialto-runtime", - "sp-core", - "sp-finality-grandpa", - "sp-keyring", - "sp-runtime", - "sp-trie", - "sp-version", - "structopt", -] - -[[package]] -name = "substrate-wasm-builder" -version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d" -dependencies = [ - "ansi_term 0.12.1", - "atty", - "build-helper", - "cargo_metadata", - "sp-maybe-compressed-blob", - "tempfile", - "toml", - "walkdir", - "wasm-gc-api", -] - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" - -[[package]] -name = "syn" -version = "1.0.68" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - -[[package]] -name = "sysinfo" -version = "0.15.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de94457a09609f33fec5e7fceaf907488967c6c7c75d64da6a7ce6ffdb8b5abd" -dependencies = [ - "cc", - "cfg-if 1.0.0", - "core-foundation-sys 0.8.2", - "doc-comment", - "libc", - "ntapi", - "once_cell", - "rayon", - "winapi 0.3.9", -] - -[[package]] -name = "take_mut" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "target-lexicon" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ae3b39281e4b14b8123bdbaddd472b7dfe215e444181f2f9d2443c2444f834" - -[[package]] -name = "tempfile" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "rand 0.8.3", - "redox_syscall 0.2.5", - "remove_dir_all", - "winapi 0.3.9", -] - -[[package]] -name = "termcolor" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" -dependencies = [ - "once_cell", -] - -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "time" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi 0.3.9", -] - -[[package]] -name = "time" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi 0.3.9", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - -[[package]] -name = "tiny-bip39" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" -dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.9.3", - "thiserror", - "unicode-normalization", - "zeroize", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "tokio" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "mio", - "num_cpus", - "tokio-codec", - "tokio-current-thread", - "tokio-executor", - "tokio-fs", - "tokio-io", - "tokio-reactor", - "tokio-sync", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "tokio-udp", - "tokio-uds", -] - -[[package]] -name = "tokio" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "libc", - "memchr", - "mio", - "mio-uds", - "num_cpus", - "pin-project-lite 0.1.11", - "signal-hook-registry", - "slab", - "winapi 0.3.9", -] - -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -dependencies = [ - "bytes 0.4.12", - "either", - "futures 0.1.31", -] - -[[package]] -name = "tokio-codec" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "tokio-io", -] - -[[package]] -name = "tokio-current-thread" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" -dependencies = [ - "futures 0.1.31", - "tokio-executor", -] - -[[package]] -name = "tokio-executor" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", -] - -[[package]] -name = "tokio-fs" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" -dependencies = [ - "futures 0.1.31", - "tokio-io", - "tokio-threadpool", -] - -[[package]] -name = "tokio-io" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log", -] - -[[package]] -name = "tokio-named-pipes" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "mio", - "mio-named-pipes", - "tokio 0.1.22", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log", - "mio", - "num_cpus", - "parking_lot 0.9.0", - "slab", - "tokio-executor", - "tokio-io", - "tokio-sync", -] - -[[package]] -name = "tokio-rustls" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" -dependencies = [ - "futures-core", - "rustls 0.18.1", - "tokio 0.2.25", - "webpki 0.21.4", -] - -[[package]] -name = "tokio-service" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" -dependencies = [ - "futures 0.1.31", -] - -[[package]] -name = "tokio-sync" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" -dependencies = [ - "fnv", - "futures 0.1.31", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "mio", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-threadpool" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" -dependencies = [ - "crossbeam-deque 0.7.3", - "crossbeam-queue", - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log", - "num_cpus", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-timer" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-udp" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log", - "mio", - "tokio-codec", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-uds" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "libc", - "log", - "mio", - "mio-uds", - "tokio-codec", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.1.11", - "tokio 0.2.25", -] - -[[package]] -name = "toml" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = [ - "serde", -] - -[[package]] -name = "tower-service" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" - -[[package]] -name = "tracing" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" -dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project 1.0.5", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa5553bf0883ba7c9cbe493b085c29926bd41b66afc31ff72cf17ff4fb60dcd5" -dependencies = [ - "ansi_term 0.12.1", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec 1.6.1", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "trie-db" -version = "0.22.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd81fe0c8bc2b528a51c9d2c31dae4483367a26a723a3c9a4a8120311d7774e3" -dependencies = [ - "hash-db", - "hashbrown", - "log", - "rustc-hex", - "smallvec 1.6.1", -] - -[[package]] -name = "trie-root" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" -dependencies = [ - "hash-db", -] - -[[package]] -name = "triehash" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" -dependencies = [ - "hash-db", - "rlp", -] - -[[package]] -name = "trust-dns-proto" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c" -dependencies = [ - "async-trait", - "cfg-if 1.0.0", - "data-encoding", - "enum-as-inner", - "futures-channel", - "futures-io", - "futures-util", - "idna 0.2.2", - "ipnet", - "lazy_static", - "log", - "rand 0.8.3", - "smallvec 1.6.1", - "thiserror", - "tinyvec", - "url 2.2.1", -] - -[[package]] -name = "trust-dns-resolver" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae" -dependencies = [ - "cfg-if 1.0.0", - "futures-util", - "ipconfig", - "lazy_static", - "log", - "lru-cache", - "parking_lot 0.11.1", - "resolv-conf", - "smallvec 1.6.1", - "thiserror", - "trust-dns-proto", -] - -[[package]] -name = "try-lock" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" - -[[package]] -name = "twox-hash" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" -dependencies = [ - "cfg-if 0.1.10", - "rand 0.7.3", - "static_assertions", -] - -[[package]] -name = "typenum" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" - -[[package]] -name = "ucd-trie" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" - -[[package]] -name = "uint" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" - -[[package]] -name = "unicode-width" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" - -[[package]] -name = "unicode-xid" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" - -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", -] - -[[package]] -name = "unsigned-varint" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" - -[[package]] -name = "unsigned-varint" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" -dependencies = [ - "asynchronous-codec 0.5.0", - "bytes 1.0.1", - "futures-io", - "futures-util", -] - -[[package]] -name = "unsigned-varint" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "futures-io", - "futures-util", -] - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "url" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -dependencies = [ - "idna 0.1.5", - "matches", - "percent-encoding 1.0.1", -] - -[[package]] -name = "url" -version = "2.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" -dependencies = [ - "form_urlencoded", - "idna 0.2.2", - "matches", - "percent-encoding 2.1.0", -] - -[[package]] -name = "value-bag" -version = "1.0.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1" -dependencies = [ - "ctor", -] - -[[package]] -name = "vcpkg" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" - -[[package]] -name = "vec-arena" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "version_check" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" - -[[package]] -name = "void" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" - -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - -[[package]] -name = "walkdir" -version = "2.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" -dependencies = [ - "same-file", - "winapi 0.3.9", - "winapi-util", -] - -[[package]] -name = "want" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -dependencies = [ - "futures 0.1.31", - "log", - "try-lock", -] - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - -[[package]] -name = "wasm-bindgen" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" - -[[package]] -name = "wasm-gc-api" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" -dependencies = [ - "log", - "parity-wasm 0.32.0", - "rustc-demangle", -] - -[[package]] -name = "wasm-timer" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" -dependencies = [ - "futures 0.3.13", - "js-sys", - "parking_lot 0.11.1", - "pin-utils", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "wasmi" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ee05bba3d1d994652079893941a2ef9324d2b58a63c31b40678fb7eddd7a5a" -dependencies = [ - "downcast-rs", - "libc", - "memory_units", - "num-rational", - "num-traits", - "parity-wasm 0.42.2", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb8e860796d8be48efef530b60eebf84e74a88bce107374fffb0da97d504b8" -dependencies = [ - "parity-wasm 0.42.2", -] - -[[package]] -name = "wasmparser" -version = "0.78.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" - -[[package]] -name = "wasmtime" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b310b9d20fcf59385761d1ade7a3ef06aecc380e3d3172035b919eaf7465d9f7" -dependencies = [ - "anyhow", - "backtrace", - "bincode", - "cfg-if 1.0.0", - "cpp_demangle", - "indexmap", - "lazy_static", - "libc", - "log", - "paste 1.0.4", - "psm", - "region", - "rustc-demangle", - "serde", - "smallvec 1.6.1", - "target-lexicon", - "wasmparser", - "wasmtime-cache", - "wasmtime-environ", - "wasmtime-fiber", - "wasmtime-jit", - "wasmtime-profiling", - "wasmtime-runtime", - "wat", - "winapi 0.3.9", -] - -[[package]] -name = "wasmtime-cache" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14d500d5c3dc5f5c097158feee123d64b3097f0d836a2a27dff9c761c73c843" -dependencies = [ - "anyhow", - "base64 0.13.0", - "bincode", - "directories-next", - "errno", - "file-per-thread-logger", - "libc", - "log", - "serde", - "sha2 0.9.3", - "toml", - "winapi 0.3.9", - "zstd", -] - -[[package]] -name = "wasmtime-cranelift" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c525b39f062eada7db3c1298287b96dcb6e472b9f6b22501300b28d9fa7582f6" -dependencies = [ - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-wasm", - "target-lexicon", - "wasmparser", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-debug" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d2a763e7a6fc734218e0e463196762a4f409c483063d81e0e85f96343b2e0a" -dependencies = [ - "anyhow", - "gimli 0.24.0", - "more-asserts", - "object 0.24.0", - "target-lexicon", - "thiserror", - "wasmparser", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-environ" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64d0c2d881c31b0d65c1f2695e022d71eb60b9fbdd336aacca28208b58eac90" -dependencies = [ - "cfg-if 1.0.0", - "cranelift-codegen", - "cranelift-entity", - "cranelift-wasm", - "gimli 0.24.0", - "indexmap", - "log", - "more-asserts", - "serde", - "thiserror", - "wasmparser", -] - -[[package]] -name = "wasmtime-fiber" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a089d44cd7e2465d41a53b840a5b4fca1bf6d1ecfebc970eac9592b34ea5f0b3" -dependencies = [ - "cc", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "wasmtime-jit" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4539ea734422b7c868107e2187d7746d8affbcaa71916d72639f53757ad707" -dependencies = [ - "addr2line 0.15.2", - "anyhow", - "cfg-if 1.0.0", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-native", - "cranelift-wasm", - "gimli 0.24.0", - "log", - "more-asserts", - "object 0.24.0", - "rayon", - "region", - "serde", - "target-lexicon", - "thiserror", - "wasmparser", - "wasmtime-cranelift", - "wasmtime-debug", - "wasmtime-environ", - "wasmtime-obj", - "wasmtime-profiling", - "wasmtime-runtime", - "winapi 0.3.9", -] - -[[package]] -name = "wasmtime-obj" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1a8ff85246d091828e2225af521a6208ed28c997bb5c39eb697366dc2e2f2b" -dependencies = [ - "anyhow", - "more-asserts", - "object 0.24.0", - "target-lexicon", - "wasmtime-debug", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-profiling" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24364d522dcd67c897c8fffc42e5bdfc57207bbb6d7eeade0da9d4a7d70105b" -dependencies = [ - "anyhow", - "cfg-if 1.0.0", - "gimli 0.24.0", - "lazy_static", - "libc", - "object 0.24.0", - "scroll", - "serde", - "target-lexicon", - "wasmtime-environ", - "wasmtime-runtime", -] - -[[package]] -name = "wasmtime-runtime" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51e57976e8a19a18a18e002c6eb12e5769554204238e47ff155fda1809ef0f7" -dependencies = [ - "anyhow", - "backtrace", - "cc", - "cfg-if 1.0.0", - "indexmap", - "lazy_static", - "libc", - "log", - "mach", - "memoffset 0.6.1", - "more-asserts", - "rand 0.8.3", - "region", - "thiserror", - "wasmtime-environ", - "wasmtime-fiber", - "winapi 0.3.9", -] - -[[package]] -name = "wast" -version = "35.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a5800e9f86a1eae935e38bea11e60fd253f6d514d153fb39b3e5535a7b37b56" -dependencies = [ - "leb128", -] - -[[package]] -name = "wat" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ec280a739b69173e0ffd12c1658507996836ba4e992ed9bc1e5385a0bd72a02" -dependencies = [ - "wast", -] - -[[package]] -name = "web-sys" -version = "0.3.47" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web3" -version = "0.15.0" -source = "git+https://github.com/tomusdrw/rust-web3.git?branch=td-ethabi#68dabc289bf9f5e59447d822c5da5b4c768175c6" -dependencies = [ - "arrayvec 0.5.2", - "derive_more", - "ethabi", - "ethereum-types", - "futures 0.3.13", - "futures-timer 3.0.2", - "hex", - "jsonrpc-core 17.0.0", - "log", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rlp", - "serde", - "serde_json", - "tiny-keccak", -] - -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" -dependencies = [ - "webpki 0.21.4", -] - -[[package]] -name = "wepoll-sys" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" -dependencies = [ - "cc", -] - -[[package]] -name = "which" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" -dependencies = [ - "libc", -] - -[[package]] -name = "which" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c14ef7e1b8b8ecfc75d5eca37949410046e66f15d185c01d70824f1f8111ef" -dependencies = [ - "libc", - "thiserror", -] - -[[package]] -name = "widestring" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "x25519-dalek" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" -dependencies = [ - "curve25519-dalek 3.0.2", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "yaml-rust" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992" - -[[package]] -name = "yamux" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" -dependencies = [ - "futures 0.3.13", - "log", - "nohash-hasher", - "parking_lot 0.11.1", - "rand 0.8.3", - "static_assertions", -] - -[[package]] -name = "zeroize" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "zstd" -version = "0.6.1+zstd.1.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "3.0.1+zstd.1.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" -dependencies = [ - "libc", - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "1.4.20+zstd.1.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" -dependencies = [ - "cc", - "libc", -] diff --git a/polkadot/bridges/Dockerfile b/polkadot/bridges/Dockerfile index cfd7fc456e50345b1b7b8c55247f7e8133980e7e..bc51f76ba99b811aa96d0ed8897da18f62e4a99d 100644 --- a/polkadot/bridges/Dockerfile +++ b/polkadot/bridges/Dockerfile @@ -13,7 +13,7 @@ WORKDIR /parity-bridges-common COPY . . -ARG PROJECT=ethereum-poa-relay +ARG PROJECT=substrate-relay RUN cargo build --release --verbose -p ${PROJECT} && \ strip ./target/release/${PROJECT} @@ -42,7 +42,7 @@ USER user WORKDIR /home/user -ARG PROJECT=ethereum-poa-relay +ARG PROJECT=substrate-relay COPY --chown=user:user --from=builder /parity-bridges-common/target/release/${PROJECT} ./ COPY --chown=user:user --from=builder /parity-bridges-common/deployments/local-scripts/bridge-entrypoint.sh ./ diff --git a/polkadot/bridges/README.md b/polkadot/bridges/README.md index b407f203b7427eb5923e770747c5c0b48cadeb82..ac3e49b94c6a6b9f8912d441d6e002b14fcd66c2 100644 --- a/polkadot/bridges/README.md +++ b/polkadot/bridges/README.md @@ -6,7 +6,7 @@ These components include Substrate pallets for syncing headers, passing arbitrar as libraries for building relayers to provide cross-chain communication capabilities. Three bridge nodes are also available. The nodes can be used to run test networks which bridge other -Substrate chains or Ethereum Proof-of-Authority chains. +Substrate chains. 🚧 The bridges are currently under construction - a hardhat is recommended beyond this point 🚧 @@ -38,6 +38,25 @@ cargo build --all cargo test --all ``` +Also you can build the repo with +[Parity CI Docker image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): + +```bash +docker pull paritytech/bridges-ci:production +mkdir ~/cache +chown 1000:1000 ~/cache #processes in the container runs as "nonroot" user with UID 1000 +docker run --rm -it -w /shellhere/parity-bridges-common \ + -v /home/$(whoami)/cache/:/cache/ \ + -v "$(pwd)":/shellhere/parity-bridges-common \ + -e CARGO_HOME=/cache/cargo/ \ + -e SCCACHE_DIR=/cache/sccache/ \ + -e CARGO_TARGET_DIR=/cache/target/ paritytech/bridges-ci:production cargo build --all +#artifacts can be found in ~/cache/target +``` + +If you want to reproduce other steps of CI process you can use the following +[guide](https://github.com/paritytech/scripts#reproduce-ci-locally). + If you need more information about setting up your development environment Substrate's [Getting Started](https://substrate.dev/docs/en/knowledgebase/getting-started/) page is a good resource. @@ -85,7 +104,6 @@ the `relays` which are used to pass messages between chains. ├── diagrams // Pretty pictures of the project architecture │ └── ... ├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── ethereum // Ethereum PoA Header Sync Module │ ├── grandpa // On-Chain GRANDPA Light Client │ ├── messages // Cross Chain Message Passing │ ├── dispatch // Target Chain Message Execution @@ -102,10 +120,9 @@ the `relays` which are used to pass messages between chains. To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes on each side of the bridge (source and target chain). -There are 3 ways to run the bridge, described below: +There are 2 ways to run the bridge, described below: -- building & running from source, -- building or using Docker images for each individual component, +- building & running from source - running a Docker Compose setup (recommended). ### Using the Source @@ -119,88 +136,102 @@ cargo build -p millau-bridge-node cargo build -p substrate-relay ``` -### Running +### Running a Dev network -To run a simple dev network you'll can use the scripts located in -[the `deployments/local-scripts` folder](./deployments/local-scripts). Since the relayer connects to -both Substrate chains it must be run last. +We will launch a dev network to demonstrate how to relay a message between two Substrate based +chains (named Rialto and Millau). + +To do this we will need two nodes, two relayers which will relay headers, and two relayers which +will relay messages. + +#### Running from local scripts + +To run a simple dev network you can use the scripts located in the +[`deployments/local-scripts` folder](./deployments/local-scripts). + +First, we must run the two Substrate nodes. ```bash # In `parity-bridges-common` folder ./deployments/local-scripts/run-rialto-node.sh ./deployments/local-scripts/run-millau-node.sh +``` + +After the nodes are up we can run the header relayers. + +```bash ./deployments/local-scripts/relay-millau-to-rialto.sh +./deployments/local-scripts/relay-rialto-to-millau.sh ``` At this point you should see the relayer submitting headers from the Millau Substrate chain to the Rialto Substrate chain. -### Local Docker Setup - -To get up and running quickly you can use published Docker images for the bridge nodes and relayer. -The images are published on [Docker Hub](https://hub.docker.com/u/paritytech). +``` +# Header Relayer Logs +[Millau_to_Rialto_Sync] [date] DEBUG bridge Going to submit finality proof of Millau header #147 to Rialto +[...] [date] INFO bridge Synced 147 of 147 headers +[...] [date] DEBUG bridge Going to submit finality proof of Millau header #148 to Rialto +[...] [date] INFO bridge Synced 148 of 149 headers +``` -To run the dev network we first run the two bridge nodes: +Finally, we can run the message relayers. ```bash -docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ - -it paritytech/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external - -docker run -p 30334:30333 -p 9934:9933 -p 9945:9944 \ - -it paritytech/millau-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external +./deployments/local-scripts/relay-messages-millau-to-rialto.sh +./deployments/local-scripts/relay-messages-rialto-to-millau.sh ``` -Notice that the `docker run` command will accept all the normal Substrate flags. For local -development you should at minimum run with the `--dev` flag or else no blocks will be produced. +You will also see the message lane relayers listening for new messages. -Then we need to initialize and run the relayer: +``` +# Message Relayer Logs +[Millau_to_Rialto_MessageLane_00000000] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about best message nonces +[...] [date] INFO bridge Synced Some(2) of Some(3) nonces in Millau::MessagesDelivery -> Rialto::MessagesDelivery race +[...] [date] DEBUG bridge Asking Millau::MessagesDelivery about message nonces +[...] [date] DEBUG bridge Received best nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { latest_nonce: 0, nonces_data: () } +[...] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about finalized message nonces +[...] [date] DEBUG bridge Received finalized nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { latest_nonce: 0, nonces_data: () } +[...] [date] DEBUG bridge Received nonces from Millau::MessagesDelivery: SourceClientNonces { new_nonces: {}, confirmed_nonce: Some(0) } +[...] [date] DEBUG bridge Asking Millau node about its state +[...] [date] DEBUG bridge Received state from Millau node: ClientState { best_self: HeaderId(1593, 0xacac***), best_finalized_self: HeaderId(1590, 0x0be81d...), best_finalized_peer_at_best_self: HeaderId(0, 0xdcdd89...) } +``` -```bash -docker run --network=host -it \ - paritytech/substrate-relay init-bridge RialtoToMillau \ - --target-host localhost \ - --target-port 9945 \ - --source-host localhost \ - --source-port 9944 \ - --target-signer //Alice +To send a message see the ["How to send a message" section](#how-to-send-a-message). -docker run --network=host -it \ - paritytech/substrate-relay relay-headers RialtoToMillau \ - --target-host localhost \ - --target-port 9945 \ - --source-host localhost \ - --source-port 9944 \ - --target-signer //Bob \ -``` +### Full Network Docker Compose Setup + +For a more sophisticated deployment which includes bidirectional header sync, message passing, +monitoring dashboards, etc. see the [Deployments README](./deployments/README.md). -You should now see the relayer submitting headers from the Millau chain to the Rialto chain. +You should note that you can find images for all the bridge components published on +[Docker Hub](https://hub.docker.com/u/paritytech). -If you don't want to use the published Docker images you can build images yourself. You can do this -by running the following commands at the top level of the repository. +To run a Rialto node for example, you can use the following command: ```bash -# In `parity-bridges-common` folder -docker build . -t local/rialto-bridge-node --build-arg PROJECT=rialto-bridge-node -docker build . -t local/millau-bridge-node --build-arg PROJECT=millau-bridge-node -docker build . -t local/substrate-relay --build-arg PROJECT=substrate-relay +docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ + -it paritytech/rialto-bridge-node --dev --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external ``` -_Note: Building the node images will take a long time, so make sure you have some coffee handy._ +### How to send a message -Once you have the images built you can use them in the previous commands by replacing -`paritytech/` with `local/` everywhere. +In this section we'll show you how to quickly send a bridge message, if you want to +interact with and test the bridge see more details in [send message](./docs/send-message.md) -### Full Network Docker Compose Setup - -For a more sophisticated deployment which includes bidirectional header sync, message passing, -monitoring dashboards, etc. see the [Deployments README](./deployments/README.md). +```bash +# In `parity-bridges-common` folder +./scripts/send-message-from-millau-rialto.sh remark +``` -### How to send a message +After sending a message you will see the following logs showing a message was successfully sent: -A straightforward way to interact with and test the bridge is sending messages. This is explained -in the [send message](./docs/send-message.md) document. +``` +INFO bridge Sending message to Rialto. Size: 286. Dispatch weight: 1038000. Fee: 275,002,568 +INFO bridge Signed Millau Call: 0x7904... +TRACE bridge Sent transaction to Millau node: 0x5e68... +``` ## Community diff --git a/polkadot/bridges/bin/millau/node/Cargo.toml b/polkadot/bridges/bin/millau/node/Cargo.toml index 8c6d32402ac24203b4275717cfa50d885e401258..c4438d0cef3ee610566efacce609ad2193b12bdd 100644 --- a/polkadot/bridges/bin/millau/node/Cargo.toml +++ b/polkadot/bridges/bin/millau/node/Cargo.toml @@ -10,23 +10,26 @@ repository = "https://github.com/paritytech/parity-bridges-common/" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -jsonrpc-core = "15.1.0" +jsonrpc-core = "18.0" structopt = "0.3.21" serde_json = "1.0.59" # Bridge dependencies -bp-messages = { path = "../../../primitives/messages" } -bp-millau= { path = "../../../primitives/chain-millau" } +bp-millau = { path = "../../../primitives/chain-millau" } bp-runtime = { path = "../../../primitives/runtime" } millau-runtime = { path = "../runtime" } pallet-bridge-messages = { path = "../../../modules/messages" } # Substrate Dependencies +beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-gadget-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } @@ -45,7 +48,6 @@ sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "mast sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -56,9 +58,6 @@ frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", bran [features] default = [] - -# TODO: https://github.com/paritytech/parity-bridges-common/issues/390 -# I've left the feature flag here to test our CI configuration runtime-benchmarks = [ - # "millau-runtime/runtime-benchmarks", + "millau-runtime/runtime-benchmarks", ] diff --git a/polkadot/bridges/bin/millau/node/src/chain_spec.rs b/polkadot/bridges/bin/millau/node/src/chain_spec.rs index 2c50897b965edfbc73a0550cfdb8ebaeebc11515..fbfca8692fcb49745f78f99c1f289dd4ec1d85c7 100644 --- a/polkadot/bridges/bin/millau/node/src/chain_spec.rs +++ b/polkadot/bridges/bin/millau/node/src/chain_spec.rs @@ -14,10 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use beefy_primitives::crypto::AuthorityId as BeefyId; use bp_millau::derive_account_from_rialto_id; use millau_runtime::{ - AccountId, AuraConfig, BalancesConfig, BridgeWestendGrandpaConfig, GenesisConfig, GrandpaConfig, SessionConfig, - SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, + AccountId, AuraConfig, BalancesConfig, BeefyConfig, BridgeRialtoMessagesConfig, + BridgeWestendGrandpaConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, + Signature, SudoConfig, SystemConfig, WASM_BINARY, }; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{sr25519, Pair, Public}; @@ -56,10 +58,11 @@ where } /// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) { +pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, BeefyId, GrandpaId) { ( get_account_id_from_seed::(s), get_from_seed::(s), + get_from_seed::(s), get_from_seed::(s), ) } @@ -70,10 +73,7 @@ impl Alternative { let properties = Some( serde_json::json!({ "tokenDecimals": 9, - "tokenSymbol": "MLAU", - "bridgeIds": { - "Rialto": bp_runtime::RIALTO_CHAIN_ID, - } + "tokenSymbol": "MLAU" }) .as_object() .expect("Map given; qed") @@ -81,8 +81,8 @@ impl Alternative { ); match self { Alternative::Development => ChainSpec::from_genesis( - "Development", - "dev", + "Millau Development", + "millau_dev", sc_service::ChainType::Development, || { testnet_genesis( @@ -107,8 +107,8 @@ impl Alternative { None, ), Alternative::LocalTestnet => ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", + "Millau Local", + "millau_local", sc_service::ChainType::Local, || { testnet_genesis( @@ -137,10 +137,12 @@ impl Alternative { get_account_id_from_seed::("Ferdie//stash"), get_account_id_from_seed::("George//stash"), get_account_id_from_seed::("Harry//stash"), - pallet_bridge_messages::Pallet::< - millau_runtime::Runtime, - pallet_bridge_messages::DefaultInstance, - >::relayer_fund_account_id(), + get_account_id_from_seed::("RialtoMessagesOwner"), + get_account_id_from_seed::("WithRialtoTokenSwap"), + pallet_bridge_messages::relayer_fund_account_id::< + bp_millau::AccountId, + bp_millau::AccountIdConverter, + >(), derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Alice"), )), @@ -173,12 +175,12 @@ impl Alternative { } } -fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys { - SessionKeys { aura, grandpa } +fn session_keys(aura: AuraId, beefy: BeefyId, grandpa: GrandpaId) -> SessionKeys { + SessionKeys { aura, beefy, grandpa } } fn testnet_genesis( - initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>, + initial_authorities: Vec<(AccountId, AuraId, BeefyId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec, _enable_println: bool, @@ -186,22 +188,20 @@ fn testnet_genesis( GenesisConfig { system: SystemConfig { code: WASM_BINARY.expect("Millau development WASM not available").to_vec(), - changes_trie_config: Default::default(), }, balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), }, - aura: AuraConfig { - authorities: Vec::new(), - }, - grandpa: GrandpaConfig { - authorities: Vec::new(), - }, + aura: AuraConfig { authorities: Vec::new() }, + beefy: BeefyConfig { authorities: Vec::new() }, + grandpa: GrandpaConfig { authorities: Vec::new() }, sudo: SudoConfig { key: root_key }, session: SessionConfig { keys: initial_authorities .iter() - .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) + .map(|x| { + (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone(), x.3.clone())) + }) .collect::>(), }, bridge_westend_grandpa: BridgeWestendGrandpaConfig { @@ -211,15 +211,17 @@ fn testnet_genesis( owner: Some(get_account_id_from_seed::("George")), ..Default::default() }, + bridge_rialto_messages: BridgeRialtoMessagesConfig { + owner: Some(get_account_id_from_seed::("RialtoMessagesOwner")), + ..Default::default() + }, } } #[test] fn derived_dave_account_is_as_expected() { let dave = get_account_id_from_seed::("Dave"); - let derived: AccountId = derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave)); - assert_eq!( - derived.to_string(), - "5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string() - ); + let derived: AccountId = + derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave)); + assert_eq!(derived.to_string(), "5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string()); } diff --git a/polkadot/bridges/bin/millau/node/src/cli.rs b/polkadot/bridges/bin/millau/node/src/cli.rs index 46323ed25c9ed2e39ebc6089b5bfa0e2ad29ddfd..086def633c59866d067ee11e891e93490e06e0d9 100644 --- a/polkadot/bridges/bin/millau/node/src/cli.rs +++ b/polkadot/bridges/bin/millau/node/src/cli.rs @@ -29,10 +29,10 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, StructOpt)] pub enum Subcommand { - /// Key management cli utilities + /// Key management CLI utilities Key(sc_cli::KeySubcommand), - /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + /// Verify a signature for a message, provided on `STDIN`, with a given (public or secret) key. Verify(sc_cli::VerifyCmd), /// Generate a seed that provides a vanity address. diff --git a/polkadot/bridges/bin/millau/node/src/command.rs b/polkadot/bridges/bin/millau/node/src/command.rs index d73f9b1ac9b2cc284550f4f07fa79a0c83fcdb5f..4dbf9575dfec7ebae6c94ef69b774fe39b71663b 100644 --- a/polkadot/bridges/bin/millau/node/src/command.rs +++ b/polkadot/bridges/bin/millau/node/src/command.rs @@ -14,11 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::{Cli, Subcommand}; -use crate::service; -use crate::service::new_partial; +use crate::{ + cli::{Cli, Subcommand}, + service, + service::new_partial, +}; use millau_runtime::{Block, RuntimeApi}; -use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; +use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; impl SubstrateCli for Cli { @@ -70,24 +72,23 @@ impl SubstrateCli for Cli { pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); // make sure to set correct crypto version. - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( millau_runtime::SS58Prefix::get() as u16, )); match &cli.subcommand { - Some(Subcommand::Benchmark(cmd)) => { + Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { println!( "Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." ); Ok(()) - } - } + }, Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::Sign(cmd)) => cmd.run(), Some(Subcommand::Verify(cmd)) => cmd.run(), @@ -95,78 +96,58 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) - } + }, Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) - } + }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = new_partial(&config)?; Ok((cmd.run(client, config.database), task_manager)) }) - } + }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; + let PartialComponents { client, task_manager, .. } = new_partial(&config)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) - } + }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&config)?; Ok((cmd.run(client, import_queue), task_manager)) }) - } + }, Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.database)) - } + }, Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - backend, - .. - } = new_partial(&config)?; + let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?; Ok((cmd.run(client, backend), task_manager)) }) - } + }, Some(Subcommand::Inspect(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) - } + runner + .sync_run(|config| cmd.run::(config)) + }, None => { let runner = cli.create_runner(&cli.run)?; runner.run_node_until_exit(|config| async move { - match config.role { - Role::Light => service::new_light(config), - _ => service::new_full(config), - } - .map_err(sc_cli::Error::Service) + service::new_full(config).map_err(sc_cli::Error::Service) }) - } + }, } } diff --git a/polkadot/bridges/bin/millau/node/src/service.rs b/polkadot/bridges/bin/millau/node/src/service.rs index 599fba1c374fb13ed72e864fb41a90aee387fb40..b01c0bfca9069d750e563b385d5c9b6f270bbdf3 100644 --- a/polkadot/bridges/bin/millau/node/src/service.rs +++ b/polkadot/bridges/bin/millau/node/src/service.rs @@ -21,18 +21,19 @@ // ===================================================================================== // UPDATE GUIDE: // 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); -// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs; -// 3) fix compilation errors; -// 4) test :) +// 2) from old code keep `rpc_extensions_builder` - we use our own custom RPCs; +// 3) from old code keep the Beefy gadget; +// 4) fix compilation errors; +// 5) test :) // ===================================================================================== // ===================================================================================== // ===================================================================================== use millau_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_client_api::ExecutorProvider; use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; pub use sc_executor::NativeElseWasmExecutor; - +use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; @@ -44,7 +45,12 @@ use std::{sync::Arc, time::Duration}; pub struct ExecutorDispatch; impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + /// Only enable the benchmarking host functions when we actually want to benchmark. + #[cfg(feature = "runtime-benchmarks")] type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + /// Otherwise we only use the default Substrate host functions. + #[cfg(not(feature = "runtime-benchmarks"))] + type ExtendHostFunctions = (); fn dispatch(method: &str, data: &[u8]) -> Option> { millau_runtime::api::dispatch(method, data) @@ -55,11 +61,11 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { } } -type FullClient = sc_service::TFullClient>; +type FullClient = + sc_service::TFullClient>; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; -#[allow(clippy::type_complexity)] pub fn new_partial( config: &Configuration, ) -> Result< @@ -70,7 +76,12 @@ pub fn new_partial( sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( - sc_finality_grandpa::GrandpaBlockImport, + sc_finality_grandpa::GrandpaBlockImport< + FullBackend, + Block, + FullClient, + FullSelectChain, + >, sc_finality_grandpa::LinkHalf, Option, ), @@ -78,7 +89,7 @@ pub fn new_partial( ServiceError, > { if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); + return Err(ServiceError::Other(format!("Remote Keystores are not supported."))) } let telemetry = config @@ -92,14 +103,22 @@ pub fn new_partial( }) .transpose()?; - let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - )?; + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; let client = Arc::new(client); let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); telemetry }); @@ -122,26 +141,30 @@ pub fn new_partial( let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import.clone())), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let import_queue = + sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: grandpa_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import.clone())), + client: client.clone(), + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot)) + }, + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new( + client.executor().clone(), + ), + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; Ok(sc_service::PartialComponents { client, @@ -155,7 +178,7 @@ pub fn new_partial( }) } -fn remote_keystore(_url: &str) -> Result, &'static str> { +fn remote_keystore(_url: &String) -> Result, &'static str> { // FIXME: here would the concrete keystore be built, // must return a concrete type (NOT `LocalKeystore`) that // implements `CryptoStore` and `SyncCryptoStore` @@ -178,32 +201,40 @@ pub fn new_full(mut config: Configuration) -> Result if let Some(url) = &config.keystore_remote { match remote_keystore(url) { Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => { + Err(e) => return Err(ServiceError::Other(format!( "Error hooking up remote keystore for {}: {}", url, e - ))) - } + ))), }; } - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); + config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + config.network.extra_sets.push(beefy_gadget::beefy_peers_set_config()); + let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + Vec::default(), + )); - let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: None, - block_announce_validator_builder: None, - })?; + let (network, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync: Some(warp_sync), + })?; if config.offchain_worker.enabled { - sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); + sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + ); } let role = config.role.clone(); @@ -212,7 +243,9 @@ pub fn new_full(mut config: Configuration) -> Result let name = config.network.node_name.clone(); let enable_grandpa = !config.disable_grandpa; let prometheus_registry = config.prometheus_registry().cloned(); - let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + let shared_voter_state = SharedVoterState::empty(); + let (signed_commitment_sender, signed_commitment_stream) = + beefy_gadget::notification::BeefySignedCommitmentStream::channel(); let rpc_extensions_builder = { use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; @@ -230,10 +263,12 @@ pub fn new_full(mut config: Configuration) -> Result let shared_authority_set = grandpa_link.shared_authority_set().clone(); let shared_voter_state = shared_voter_state.clone(); - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); + let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service( + backend, + Some(shared_authority_set.clone()), + ); - Box::new(move |_, subscription_executor| { + Box::new(move |_, subscription_executor: sc_rpc::SubscriptionTaskExecutor| { let mut io = jsonrpc_core::IoHandler::default(); io.extend_with(SystemApi::to_delegate(FullSystem::new( client.clone(), @@ -247,10 +282,19 @@ pub fn new_full(mut config: Configuration) -> Result shared_authority_set.clone(), shared_voter_state.clone(), justification_stream.clone(), - subscription_executor, + subscription_executor.clone(), finality_proof_provider.clone(), ))); - io + io.extend_with(beefy_gadget_rpc::BeefyApi::to_delegate( + beefy_gadget_rpc::BeefyRpcHandler::new( + signed_commitment_stream.clone(), + subscription_executor, + ), + )); + io.extend_with(pallet_mmr_rpc::MmrApi::to_delegate(pallet_mmr_rpc::Mmr::new( + client.clone(), + ))); + Ok(io) }) }; @@ -261,9 +305,7 @@ pub fn new_full(mut config: Configuration) -> Result task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, - backend, + backend: backend.clone(), system_rpc_tx, config, telemetry: telemetry.as_mut(), @@ -278,51 +320,71 @@ pub fn new_full(mut config: Configuration) -> Result telemetry.as_ref().map(|x| x.handle()), ); - let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); let slot_duration = sc_consensus_aura::slot_duration(&*client)?; let raw_slot_duration = slot_duration.slot_duration(); - let aura = sc_consensus_aura::start_aura::(StartAuraParams { - slot_duration, - client, - select_chain, - block_import, - proposer_factory, - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - raw_slot_duration, - ); - - Ok((timestamp, slot)) + let aura = sc_consensus_aura::start_aura::( + StartAuraParams { + slot_duration, + client: client.clone(), + select_chain, + block_import, + proposer_factory, + create_inherent_data_providers: move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + raw_slot_duration, + ); + + Ok((timestamp, slot)) + }, + force_authoring, + backoff_authoring_blocks, + keystore: keystore_container.sync_keystore(), + can_author_with, + sync_oracle: network.clone(), + justification_sync_link: network.clone(), + block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + max_block_proposal_slot_portion: None, + telemetry: telemetry.as_ref().map(|x| x.handle()), }, - force_authoring, - backoff_authoring_blocks, - keystore: keystore_container.sync_keystore(), - can_author_with, - sync_oracle: network.clone(), - justification_sync_link: network.clone(), - block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), - max_block_proposal_slot_portion: None, - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; + )?; // the AURA authoring task is considered essential, i.e. if it // fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking("aura", aura); + task_manager + .spawn_essential_handle() + .spawn_blocking("aura", Some("block-authoring"), aura); } // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None + let keystore = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + + let beefy_params = beefy_gadget::BeefyParams { + client, + backend, + key_store: keystore.clone(), + network: network.clone(), + signed_commitment_sender, + min_block_delta: 4, + prometheus_registry: prometheus_registry.clone(), }; + // Start the BEEFY bridge gadget. + task_manager.spawn_essential_handle().spawn_blocking( + "beefy-gadget", + None, + beefy_gadget::start_beefy_gadget::<_, _, _, _>(beefy_params), + ); + let grandpa_config = sc_finality_grandpa::Config { // FIXME #1578 make this available through chainspec gossip_duration: Duration::from_millis(333), @@ -353,133 +415,13 @@ pub fn new_full(mut config: Configuration) -> Result // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. - task_manager - .spawn_essential_handle() - .spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?); - } - - network_starter.start_network(); - Ok(task_manager) -} - -/// Builds a new service for a light client. -pub fn new_light(mut config: Configuration) -> Result { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::( - &config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - )?; - - let mut telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); - - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( - config.transaction_pool.clone(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - on_demand.clone(), - )); - - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - telemetry.as_ref().map(|x| x.handle()), - )?; - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - - let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import)), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::NeverCanAuthor, - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; - - let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); - } - - let enable_grandpa = !config.disable_grandpa; - if enable_grandpa { - let name = config.network.node_name.clone(); - - let config = sc_finality_grandpa::Config { - gossip_duration: std::time::Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore: None, - local_role: config.role.clone(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - task_manager.spawn_handle().spawn_blocking( - "grandpa-observer", - sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + None, + sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, ); } - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - remote_blockchain: Some(backend.remote_blockchain()), - transaction_pool, - task_manager: &mut task_manager, - on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_, _| ()), - config, - client, - keystore: keystore_container.sync_keystore(), - backend, - network, - system_rpc_tx, - telemetry: telemetry.as_mut(), - })?; - network_starter.start_network(); Ok(task_manager) } diff --git a/polkadot/bridges/bin/millau/runtime/Cargo.toml b/polkadot/bridges/bin/millau/runtime/Cargo.toml index 367c1c3eef70421e89a9bab3858002a5f53a40ea..13195b95194ba787f82746181a530ce498754578 100644 --- a/polkadot/bridges/bin/millau/runtime/Cargo.toml +++ b/polkadot/bridges/bin/millau/runtime/Cargo.toml @@ -8,9 +8,10 @@ repository = "https://github.com/paritytech/parity-bridges-common/" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } hex-literal = "0.3" -serde = { version = "1.0.124", optional = true, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0", optional = true, features = ["derive"] } # Bridge dependencies @@ -24,36 +25,43 @@ bridge-runtime-common = { path = "../../runtime-common", default-features = fals pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } +pallet-bridge-token-swap = { path = "../../../modules/token-swap", default-features = false } pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } # Substrate Dependencies -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [build-dependencies] substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -61,6 +69,7 @@ substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", bran [features] default = ["std"] std = [ + "beefy-primitives/std", "bp-header-chain/std", "bp-messages/std", "bp-millau/std", @@ -75,10 +84,14 @@ std = [ "frame-system/std", "pallet-aura/std", "pallet-balances/std", + "pallet-beefy/std", + "pallet-beefy-mmr/std", "pallet-bridge-dispatch/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", + "pallet-bridge-token-swap/std", "pallet-grandpa/std", + "pallet-mmr/std", "pallet-randomness-collective-flip/std", "pallet-session/std", "pallet-shift-session-manager/std", @@ -86,6 +99,7 @@ std = [ "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", + "scale-info/std", "serde", "sp-api/std", "sp-block-builder/std", @@ -101,6 +115,9 @@ std = [ "sp-trie/std", "sp-version/std", ] -# TODO: https://github.com/paritytech/parity-bridges-common/issues/390 -# I've left the feature flag here to test our CI configuration -runtime-benchmarks = [] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "pallet-bridge-token-swap/runtime-benchmarks", +] diff --git a/polkadot/bridges/bin/millau/runtime/src/lib.rs b/polkadot/bridges/bin/millau/runtime/src/lib.rs index 9488fe0cb42805111d53a6b9e59167ad92298b93..d230e7aadd8e290aeb48381ff41fe44263c19544 100644 --- a/polkadot/bridges/bin/millau/runtime/src/lib.rs +++ b/polkadot/bridges/bin/millau/runtime/src/lib.rs @@ -34,18 +34,25 @@ pub mod rialto_messages; use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge}; -use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge}; -use codec::Decode; -use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; +use beefy_primitives::{crypto::AuthorityId as BeefyId, mmr::MmrLeafVersion, ValidatorSet}; +use bridge_runtime_common::messages::{ + source::estimate_message_dispatch_and_delivery_fee, MessageBridge, +}; +use pallet_grandpa::{ + fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, +}; +use pallet_mmr_primitives::{ + DataOrHash, EncodableOpaqueLeaf, Error as MmrError, LeafDataProvider, Proof as MmrProof, +}; +use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, + traits::{Block as BlockT, IdentityLookup, Keccak256, NumberFor, OpaqueKeys}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, MultiSigner, + ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill, }; use sp_std::prelude::*; #[cfg(feature = "std")] @@ -62,8 +69,7 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_bridge_grandpa::Call as BridgeGrandpaRialtoCall; -pub use pallet_bridge_grandpa::Call as BridgeGrandpaWestendCall; +pub use pallet_bridge_grandpa::Call as BridgeGrandpaCall; pub use pallet_bridge_messages::Call as MessagesCall; pub use pallet_sudo::Call as SudoCall; pub use pallet_timestamp::Call as TimestampCall; @@ -90,7 +96,7 @@ pub type AccountIndex = u32; pub type Balance = bp_millau::Balance; /// Index of a transaction in the chain. -pub type Index = u32; +pub type Index = bp_millau::Index; /// A hash of some data used by the chain. pub type Hash = bp_millau::Hash; @@ -98,9 +104,6 @@ pub type Hash = bp_millau::Hash; /// Hashing algorithm used by the chain. pub type Hashing = bp_millau::Hasher; -/// Digest item type. -pub type DigestItem = generic::DigestItem; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -121,6 +124,7 @@ pub mod opaque { impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, + pub beefy: Beefy, pub grandpa: Grandpa, } } @@ -139,10 +143,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } parameter_types! { @@ -214,10 +215,16 @@ parameter_types! { impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type MaxAuthorities = MaxAuthorities; + type DisabledValidators = (); } + +impl pallet_beefy::Config for Runtime { + type BeefyId = BeefyId; +} + impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; - type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce); + type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); type Call = Call; type CallFilter = frame_support::traits::Everything; type EncodedCall = crate::rialto_messages::FromRialtoEncodedCall; @@ -231,12 +238,50 @@ impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; type KeyOwnerProofSystem = (); - type KeyOwnerProof = >::Proof; - type KeyOwnerIdentification = - >::IdentificationTuple; + type KeyOwnerProof = + >::Proof; + type KeyOwnerIdentification = >::IdentificationTuple; type HandleEquivocation = (); // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; +} + +type MmrHash = ::Output; + +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = Keccak256; + type Hash = MmrHash; + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + type WeightInfo = (); + type LeafData = pallet_beefy_mmr::Pallet; +} + +parameter_types! { + /// Version of the produced MMR leaf. + /// + /// The version consists of two parts; + /// - `major` (3 bits) + /// - `minor` (5 bits) + /// + /// `major` should be updated only if decoding the previous MMR Leaf format from the payload + /// is not possible (i.e. backward incompatible change). + /// `minor` should be updated if fields are added to the previous MMR Leaf, which given SCALE + /// encoding does not prevent old leafs from being decoded. + /// + /// Hence we expect `major` to be changed really rarely (think never). + /// See [`MmrLeafVersion`] type documentation for more details. + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); +} + +impl pallet_beefy_mmr::Config for Runtime { + type LeafVersion = LeafVersion; + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + type ParachainHeads = (); } parameter_types! { @@ -244,7 +289,7 @@ parameter_types! { } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the Unix epoch. + /// A timestamp: milliseconds since the UNIX epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; @@ -278,13 +323,25 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const TransactionBaseFee: Balance = 0; pub const TransactionByteFee: Balance = 1; + pub const OperationalFeeMultiplier: u8 = 5; + // values for following parameters are copied from polkadot repo, but it is fine + // not to sync them - we're not going to make Rialto a full copy of one of Polkadot-like chains + pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); + pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); + pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; type TransactionByteFee = TransactionByteFee; - type WeightToFee = IdentityFee; - type FeeMultiplierUpdate = (); + type OperationalFeeMultiplier = OperationalFeeMultiplier; + type WeightToFee = bp_millau::WeightToFee; + type FeeMultiplierUpdate = pallet_transaction_payment::TargetedFeeAdjustment< + Runtime, + TargetBlockFullness, + AdjustmentVariable, + MinimumMultiplier, + >; } impl pallet_sudo::Config for Runtime { @@ -357,10 +414,11 @@ parameter_types! { pub const GetDeliveryConfirmationTransactionFee: Balance = bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; pub const RootAccountForPayments: Option = None; + pub const RialtoChainId: bp_runtime::ChainId = bp_runtime::RIALTO_CHAIN_ID; } /// Instance of the messages pallet used to relay messages to/from Rialto chain. -pub type WithRialtoMessagesInstance = pallet_bridge_messages::DefaultInstance; +pub type WithRialtoMessagesInstance = (); impl pallet_bridge_messages::Config for Runtime { type Event = Event; @@ -382,16 +440,45 @@ impl pallet_bridge_messages::Config for Runtime { type TargetHeaderChain = crate::rialto_messages::Rialto; type LaneMessageVerifier = crate::rialto_messages::ToRialtoMessageVerifier; - type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments< - Runtime, - pallet_balances::Pallet, - GetDeliveryConfirmationTransactionFee, - RootAccountForPayments, - >; - type OnDeliveryConfirmed = (); + type MessageDeliveryAndDispatchPayment = + pallet_bridge_messages::instant_payments::InstantCurrencyPayments< + Runtime, + (), + pallet_balances::Pallet, + GetDeliveryConfirmationTransactionFee, + RootAccountForPayments, + >; + type OnMessageAccepted = (); + type OnDeliveryConfirmed = + pallet_bridge_token_swap::Pallet; type SourceHeaderChain = crate::rialto_messages::Rialto; type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch; + type BridgedChainId = RialtoChainId; +} + +parameter_types! { + pub const TokenSwapMessagesLane: bp_messages::LaneId = *b"swap"; +} + +/// Instance of the with-Rialto token swap pallet. +pub type WithRialtoTokenSwapInstance = (); + +impl pallet_bridge_token_swap::Config for Runtime { + type Event = Event; + type WeightInfo = (); + + type BridgedChainId = RialtoChainId; + type OutboundMessageLaneId = TokenSwapMessagesLane; + #[cfg(not(feature = "runtime-benchmarks"))] + type MessagesBridge = pallet_bridge_messages::Pallet; + #[cfg(feature = "runtime-benchmarks")] + type MessagesBridge = bp_messages::source_chain::NoopMessagesBridge; + type ThisCurrency = pallet_balances::Pallet; + type FromSwapToThisAccountIdConverter = bp_rialto::AccountIdConverter; + + type BridgedChain = bp_rialto::Rialto; + type FromBridgedToThisAccountIdConverter = bp_millau::AccountIdConverter; } construct_runtime!( @@ -400,20 +487,35 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - BridgeRialtoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event}, - BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, - BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, - BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Config, Storage}, System: frame_system::{Pallet, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + + // Must be before session. Aura: pallet_aura::{Pallet, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, + + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + + // Consensus support. Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, ShiftSessionManager: pallet_shift_session_manager::{Pallet}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + + // BEEFY Bridges support. + Beefy: pallet_beefy::{Pallet, Storage, Config}, + Mmr: pallet_mmr::{Pallet, Storage}, + MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, + + // Rialto bridge modules. + BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, + BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, + BridgeRialtoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, + BridgeRialtoTokenSwap: pallet_bridge_token_swap::{Pallet, Call, Storage, Event}, + + // Westend bridge modules. + BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Config, Storage}, } ); @@ -449,7 +551,7 @@ pub type Executive = frame_executive::Executive< Block, frame_system::ChainContext, Runtime, - AllPalletsWithSystem, + AllPallets, >; impl_runtime_apis! { @@ -522,7 +624,7 @@ impl_runtime_apis! { } fn authorities() -> Vec { - Aura::authorities() + Aura::authorities().to_vec() } } @@ -550,7 +652,50 @@ impl_runtime_apis! { } } + impl beefy_primitives::BeefyApi for Runtime { + fn validator_set() -> ValidatorSet { + Beefy::validator_set() + } + } + + impl pallet_mmr_primitives::MmrApi for Runtime { + fn generate_proof(leaf_index: u64) + -> Result<(EncodableOpaqueLeaf, MmrProof), MmrError> + { + Mmr::generate_proof(leaf_index) + .map(|(leaf, proof)| (EncodableOpaqueLeaf::from_leaf(&leaf), proof)) + } + + fn verify_proof(leaf: EncodableOpaqueLeaf, proof: MmrProof) + -> Result<(), MmrError> + { + pub type Leaf = < + ::LeafData as LeafDataProvider + >::LeafData; + + let leaf: Leaf = leaf + .into_opaque_leaf() + .try_decode() + .ok_or(MmrError::Verify)?; + Mmr::verify_leaf(leaf, proof) + } + + fn verify_proof_stateless( + root: MmrHash, + leaf: EncodableOpaqueLeaf, + proof: MmrProof + ) -> Result<(), MmrError> { + type MmrHashing = ::Hashing; + let node = DataOrHash::Data(leaf.into_opaque_leaf()); + pallet_mmr::verify_leaf_proof::(root, node, proof) + } + } + impl fg_primitives::GrandpaApi for Runtime { + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + fn grandpa_authorities() -> GrandpaAuthorityList { Grandpa::grandpa_authorities() } @@ -619,20 +764,11 @@ impl_runtime_apis! { begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, ) -> Vec> { - (begin..=end).filter_map(|nonce| { - let message_data = BridgeRialtoMessages::outbound_message_data(lane, nonce)?; - let decoded_payload = rialto_messages::ToRialtoMessagePayload::decode( - &mut &message_data.payload[..] - ).ok()?; - Some(bp_messages::MessageDetails { - nonce, - dispatch_weight: decoded_payload.weight, - size: message_data.payload.len() as _, - delivery_and_dispatch_fee: message_data.fee, - dispatch_fee_payment: decoded_payload.dispatch_fee_payment, - }) - }) - .collect() + bridge_runtime_common::messages_api::outbound_message_details::< + Runtime, + WithRialtoMessagesInstance, + WithRialtoMessageBridge, + >(lane, begin, end) } fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { @@ -657,6 +793,67 @@ impl_runtime_apis! { BridgeRialtoMessages::inbound_unrewarded_relayers_state(lane) } } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + + let mut list = Vec::::new(); + + list_benchmark!(list, extra, pallet_bridge_token_swap, BridgeRialtoTokenSwap); + + let storage_info = AllPalletsWithSystem::storage_info(); + + return (list, storage_info) + } + + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig, + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, TrackedStorageKey, add_benchmark}; + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + // Caller 0 Account + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + + use pallet_bridge_token_swap::benchmarking::Config as TokenSwapConfig; + + impl TokenSwapConfig for Runtime { + fn initialize_environment() { + let relayers_fund_account = pallet_bridge_messages::relayer_fund_account_id::< + bp_millau::AccountId, + bp_millau::AccountIdConverter, + >(); + pallet_balances::Pallet::::make_free_balance_be( + &relayers_fund_account, + Balance::MAX / 100, + ); + } + } + + add_benchmark!(params, batches, pallet_bridge_token_swap, BridgeRialtoTokenSwap); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } } /// Rialto account ownership digest from Millau. @@ -698,6 +895,7 @@ mod tests { bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT, + DbWeight::get(), ); let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add( @@ -707,21 +905,31 @@ mod tests { bp_millau::max_extrinsic_size(), bp_millau::max_extrinsic_weight(), max_incoming_message_proof_size, - messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()), + messages::target::maximal_incoming_message_dispatch_weight( + bp_millau::max_extrinsic_weight(), + ), ); - let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( - bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, - bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _, - ) - .unwrap_or(u32::MAX); + let max_incoming_inbound_lane_data_proof_size = + bp_messages::InboundLaneData::<()>::encoded_size_hint( + bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, + bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _, + ) + .unwrap_or(u32::MAX); pallet_bridge_messages::ensure_able_to_receive_confirmation::( bp_millau::max_extrinsic_size(), bp_millau::max_extrinsic_weight(), max_incoming_inbound_lane_data_proof_size, bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + DbWeight::get(), ); } + + #[test] + fn call_size() { + const MAX_CALL_SIZE: usize = 230; // value from polkadot-runtime tests + assert!(core::mem::size_of::() <= MAX_CALL_SIZE); + } } diff --git a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs index 12af2c328521b97bbc5e47de6325e2996e006bcd..6d9677c45cf91be170b362c2a3c24807abd7029a 100644 --- a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs +++ b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs @@ -31,25 +31,34 @@ use frame_support::{ weights::{DispatchClass, Weight}, RuntimeDebug, }; -use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128}; +use scale_info::TypeInfo; +use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128}; use sp_std::{convert::TryFrom, ops::RangeInclusive}; /// Initial value of `RialtoToMillauConversionRate` parameter. -pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV); +pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 = + FixedU128::from_inner(FixedU128::DIV); +/// Initial value of `RialtoFeeMultiplier` parameter. +pub const INITIAL_RIALTO_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV); parameter_types! { /// Rialto to Millau conversion rate. Initially we treat both tokens as equal. pub storage RialtoToMillauConversionRate: FixedU128 = INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE; + /// Fee multiplier value at Rialto chain. + pub storage RialtoFeeMultiplier: FixedU128 = INITIAL_RIALTO_FEE_MULTIPLIER; } /// Message payload for Millau -> Rialto messages. -pub type ToRialtoMessagePayload = messages::source::FromThisChainMessagePayload; +pub type ToRialtoMessagePayload = + messages::source::FromThisChainMessagePayload; /// Message verifier for Millau -> Rialto messages. -pub type ToRialtoMessageVerifier = messages::source::FromThisChainMessageVerifier; +pub type ToRialtoMessageVerifier = + messages::source::FromThisChainMessageVerifier; /// Message payload for Rialto -> Millau messages. -pub type FromRialtoMessagePayload = messages::target::FromBridgedChainMessagePayload; +pub type FromRialtoMessagePayload = + messages::target::FromBridgedChainMessagePayload; /// Encoded Millau Call as it comes from Rialto. pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; @@ -58,14 +67,15 @@ pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessag type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof; /// Messages delivery proof for Millau -> Rialto messages. -type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof; +type ToRialtoMessagesDeliveryProof = + messages::source::FromBridgedChainMessagesDeliveryProof; /// Call-dispatch based message dispatch for Rialto -> Millau messages. pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch< WithRialtoMessageBridge, crate::Runtime, pallet_balances::Pallet, - pallet_bridge_dispatch::DefaultInstance, + (), >; /// Millau <-> Rialto message bridge. @@ -76,14 +86,16 @@ impl MessageBridge for WithRialtoMessageBridge { const RELAYER_FEE_PERCENT: u32 = 10; const THIS_CHAIN_ID: ChainId = MILLAU_CHAIN_ID; const BRIDGED_CHAIN_ID: ChainId = RIALTO_CHAIN_ID; + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME; type ThisChain = Millau; type BridgedChain = Rialto; - type BridgedMessagesInstance = crate::WithRialtoMessagesInstance; fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance { - bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance)) - .unwrap_or(bp_millau::Balance::MAX) + bp_millau::Balance::try_from( + RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance), + ) + .unwrap_or(bp_millau::Balance::MAX) } } @@ -104,7 +116,9 @@ impl messages::ThisChainWithMessages for Millau { type Call = crate::Call; fn is_outbound_lane_enabled(lane: &LaneId) -> bool { - *lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1] + *lane == [0, 0, 0, 0] || + *lane == [0, 0, 0, 1] || + *lane == crate::TokenSwapMessagesLane::get() } fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { @@ -128,11 +142,15 @@ impl messages::ThisChainWithMessages for Millau { } fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { + // `transaction` may represent transaction from the future, when multiplier value will + // be larger, so let's use slightly increased value + let multiplier = FixedU128::saturating_from_rational(110, 100) + .saturating_mul(pallet_transaction_payment::Pallet::::next_fee_multiplier()); // in our testnets, both per-byte fee and weight-to-fee are 1:1 messages::transaction_payment( bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, 1, - FixedU128::zero(), + multiplier, |weight| weight as _, transaction, ) @@ -159,12 +177,15 @@ impl messages::BridgedChainWithMessages for Rialto { fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); + let upper_limit = messages::target::maximal_incoming_message_dispatch_weight( + bp_rialto::max_extrinsic_weight(), + ); - // we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment` function + // we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment` + // function // - // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about - // minimal dispatch weight here + // this bridge may be used to deliver all kind of messages, so we're not making any + // assumptions about minimal dispatch weight here 0..=upper_limit } @@ -195,11 +216,14 @@ impl messages::BridgedChainWithMessages for Rialto { } fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { + // we don't have a direct access to the value of multiplier at Rialto chain + // => it is a messages module parameter + let multiplier = RialtoFeeMultiplier::get(); // in our testnets, both per-byte fee and weight-to-fee are 1:1 messages::transaction_payment( bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, 1, - FixedU128::zero(), + multiplier, |weight| weight as _, transaction, ) @@ -221,9 +245,11 @@ impl TargetHeaderChain for Rialto fn verify_messages_delivery_proof( proof: Self::MessagesDeliveryProof, ) -> Result<(LaneId, InboundLaneData), Self::Error> { - messages::source::verify_messages_delivery_proof::( - proof, - ) + messages::source::verify_messages_delivery_proof::< + WithRialtoMessageBridge, + Runtime, + crate::RialtoGrandpaInstance, + >(proof) } } @@ -240,15 +266,16 @@ impl SourceHeaderChain for Rialto { proof: Self::MessagesProof, messages_count: u32, ) -> Result>, Self::Error> { - messages::target::verify_messages_proof::( - proof, - messages_count, - ) + messages::target::verify_messages_proof::< + WithRialtoMessageBridge, + Runtime, + crate::RialtoGrandpaInstance, + >(proof, messages_count) } } /// Millau -> Rialto message lane pallet parameters. -#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] +#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] pub enum MillauToRialtoMessagesParameter { /// The conversion formula we use is: `MillauTokens = RialtoTokens * conversion_rate`. RialtoToMillauConversionRate(FixedU128), @@ -257,9 +284,8 @@ pub enum MillauToRialtoMessagesParameter { impl MessagesParameter for MillauToRialtoMessagesParameter { fn save(&self) { match *self { - MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) => { - RialtoToMillauConversionRate::set(conversion_rate) - } + MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) => + RialtoToMillauConversionRate::set(conversion_rate), } } } diff --git a/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml b/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..8adc998e47ee38b1eeebb0e8b280cd01ddf3d056 --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml @@ -0,0 +1,89 @@ +[package] +name = "rialto-parachain-collator" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/parity-bridges-common/" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[build-dependencies] +substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[[bin]] +name = 'rialto-parachain-collator' + +[features] +default = [] +runtime-benchmarks = ['rialto-parachain-runtime/runtime-benchmarks'] + +[dependencies] +derive_more = '0.99.2' +log = '0.4.14' +codec = { package = 'parity-scale-codec', version = '2.0.0' } +structopt = '0.3.8' +serde = { version = '1.0', features = ['derive'] } +hex-literal = '0.3.1' + +# RPC related Dependencies +jsonrpc-core = '18.0' + +# Local Dependencies +rialto-parachain-runtime = { path = '../runtime' } + +# Substrate Dependencies +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } + +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } + +substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } + +## Substrate Client Dependencies +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", features = ['wasmtime'] } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } + +## Substrate Primitive Dependencies +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } + +# Cumulus dependencies +cumulus-client-consensus-aura = { git = "https://github.com/paritytech/cumulus", branch = "master" } +cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "master" } +cumulus-client-collator = { git = "https://github.com/paritytech/cumulus", branch = "master" } +cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "master" } +cumulus-client-network = { git = "https://github.com/paritytech/cumulus", branch = "master" } +cumulus-client-service = { git = "https://github.com/paritytech/cumulus", branch = "master" } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "master" } +cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "master" } + +# Polkadot dependencies +polkadot-cli = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-test-service = { git = "https://github.com/paritytech/polkadot", branch = "master" } \ No newline at end of file diff --git a/polkadot/bridges/relays/exchange/src/lib.rs b/polkadot/bridges/bin/rialto-parachain/node/build.rs similarity index 63% rename from polkadot/bridges/relays/exchange/src/lib.rs rename to polkadot/bridges/bin/rialto-parachain/node/build.rs index 370f085b4bf7c2edb164122ceb6a6aa22889981f..8ba8a31e9a79fdf45e93c7efb49470e207f90049 100644 --- a/polkadot/bridges/relays/exchange/src/lib.rs +++ b/polkadot/bridges/bin/rialto-parachain/node/build.rs @@ -14,13 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Relaying [`currency-exchange`](../pallet_bridge_currency_exchange/index.html) application -//! specific data. Currency exchange application allows exchanging tokens between bridged chains. -//! This module provides entrypoints for crafting and submitting (single and multiple) -//! proof-of-exchange-at-source-chain transaction(s) to target chain. +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; -#![warn(missing_docs)] - -pub mod exchange; -pub mod exchange_loop; -pub mod exchange_loop_metrics; +fn main() { + generate_cargo_keys(); + rerun_if_git_head_changed(); +} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs new file mode 100644 index 0000000000000000000000000000000000000000..52012423fb7169ca0f2aadff7e2d571eb91ce0e1 --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs @@ -0,0 +1,164 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use cumulus_primitives_core::ParaId; +use rialto_parachain_runtime::{AccountId, AuraId, Signature}; +use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; +use sc_service::ChainType; +use serde::{Deserialize, Serialize}; +use sp_core::{sr25519, Pair, Public}; +use sp_runtime::traits::{IdentifyAccount, Verify}; + +/// Specialized `ChainSpec` for the normal parachain runtime. +pub type ChainSpec = + sc_service::GenericChainSpec; + +/// Helper function to generate a crypto pair from seed +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() +} + +/// The extensions for the [`ChainSpec`]. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] +#[serde(deny_unknown_fields)] +pub struct Extensions { + /// The relay chain of the Parachain. + pub relay_chain: String, + /// The id of the Parachain. + pub para_id: u32, +} + +impl Extensions { + /// Try to get the extension from the given `ChainSpec`. + pub fn try_get(chain_spec: &dyn sc_service::ChainSpec) -> Option<&Self> { + sc_chain_spec::get_extension(chain_spec.extensions()) + } +} + +type AccountPublic = ::Signer; + +/// Helper function to generate an account ID from seed +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() +} + +pub fn development_config(id: ParaId) -> ChainSpec { + // Give your base currency a unit name and decimal places + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("tokenSymbol".into(), "UNIT".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + ChainSpec::from_genesis( + // Name + "Development", + // ID + "dev", + ChainType::Local, + move || { + testnet_genesis( + get_account_id_from_seed::("Alice"), + vec![get_from_seed::("Alice"), get_from_seed::("Bob")], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + id, + ) + }, + vec![], + None, + None, + None, + Extensions { + relay_chain: "rococo-local".into(), // You MUST set this to the correct network! + para_id: id.into(), + }, + ) +} + +pub fn local_testnet_config(id: ParaId) -> ChainSpec { + // Give your base currency a unit name and decimal places + let mut properties = sc_chain_spec::Properties::new(); + properties.insert("tokenSymbol".into(), "UNIT".into()); + properties.insert("tokenDecimals".into(), 12.into()); + + ChainSpec::from_genesis( + // Name + "Local Testnet", + // ID + "local_testnet", + ChainType::Local, + move || { + testnet_genesis( + get_account_id_from_seed::("Alice"), + vec![get_from_seed::("Alice"), get_from_seed::("Bob")], + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + id, + ) + }, + Vec::new(), + None, + None, + None, + Extensions { + relay_chain: "rococo-local".into(), // You MUST set this to the correct network! + para_id: id.into(), + }, + ) +} + +fn testnet_genesis( + root_key: AccountId, + initial_authorities: Vec, + endowed_accounts: Vec, + id: ParaId, +) -> rialto_parachain_runtime::GenesisConfig { + rialto_parachain_runtime::GenesisConfig { + system: rialto_parachain_runtime::SystemConfig { + code: rialto_parachain_runtime::WASM_BINARY + .expect("WASM binary was not build, please build it!") + .to_vec(), + }, + balances: rialto_parachain_runtime::BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), + }, + sudo: rialto_parachain_runtime::SudoConfig { key: root_key }, + parachain_info: rialto_parachain_runtime::ParachainInfoConfig { parachain_id: id }, + aura: rialto_parachain_runtime::AuraConfig { authorities: initial_authorities }, + aura_ext: Default::default(), + // parachain_system: Default::default(), + } +} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs b/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs new file mode 100644 index 0000000000000000000000000000000000000000..78c05f90c88001c2f5d413a6481beee4a8cb0185 --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs @@ -0,0 +1,140 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::chain_spec; +use std::path::PathBuf; +use structopt::StructOpt; + +/// Sub-commands supported by the collator. +#[derive(Debug, StructOpt)] +pub enum Subcommand { + /// Export the genesis state of the parachain. + #[structopt(name = "export-genesis-state")] + ExportGenesisState(ExportGenesisStateCommand), + + /// Export the genesis wasm of the parachain. + #[structopt(name = "export-genesis-wasm")] + ExportGenesisWasm(ExportGenesisWasmCommand), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(cumulus_client_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// The custom benchmark subcommmand benchmarking runtime pallets. + #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), +} + +/// Command for exporting the genesis state of the parachain +#[derive(Debug, StructOpt)] +pub struct ExportGenesisStateCommand { + /// Output file name or stdout if unspecified. + #[structopt(parse(from_os_str))] + pub output: Option, + + /// Id of the parachain this state is for. + /// + /// Default: 100 + #[structopt(long, conflicts_with = "chain")] + pub parachain_id: Option, + + /// Write output in binary. Default is to write in hex. + #[structopt(short, long)] + pub raw: bool, + + /// The name of the chain for that the genesis state should be exported. + #[structopt(long, conflicts_with = "parachain-id")] + pub chain: Option, +} + +/// Command for exporting the genesis wasm file. +#[derive(Debug, StructOpt)] +pub struct ExportGenesisWasmCommand { + /// Output file name or stdout if unspecified. + #[structopt(parse(from_os_str))] + pub output: Option, + + /// Write output in binary. Default is to write in hex. + #[structopt(short, long)] + pub raw: bool, + + /// The name of the chain for that the genesis wasm file should be exported. + #[structopt(long)] + pub chain: Option, +} + +#[derive(Debug, StructOpt)] +#[structopt(settings = &[ + structopt::clap::AppSettings::GlobalVersion, + structopt::clap::AppSettings::ArgsNegateSubcommands, + structopt::clap::AppSettings::SubcommandsNegateReqs, +])] +pub struct Cli { + #[structopt(subcommand)] + pub subcommand: Option, + + #[structopt(long)] + pub parachain_id: Option, + + #[structopt(flatten)] + pub run: cumulus_client_cli::RunCmd, + + /// Relaychain arguments + #[structopt(raw = true)] + pub relaychain_args: Vec, +} + +#[derive(Debug)] +pub struct RelayChainCli { + /// The actual relay chain cli object. + pub base: polkadot_cli::RunCmd, + + /// Optional chain id that should be passed to the relay chain. + pub chain_id: Option, + + /// The base path that should be used by the relay chain. + pub base_path: Option, +} + +impl RelayChainCli { + /// Parse the relay chain CLI parameters using the para chain `Configuration`. + pub fn new<'a>( + para_config: &sc_service::Configuration, + relay_chain_args: impl Iterator, + ) -> Self { + let extension = chain_spec::Extensions::try_get(&*para_config.chain_spec); + let chain_id = extension.map(|e| e.relay_chain.clone()); + let base_path = para_config.base_path.as_ref().map(|x| x.path().join("rialto-bridge-node")); + Self { base_path, chain_id, base: polkadot_cli::RunCmd::from_iter(relay_chain_args) } + } +} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/command.rs b/polkadot/bridges/bin/rialto-parachain/node/src/command.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4f52cc026a7e7c9ebbf33276f71719a9500561e --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/node/src/command.rs @@ -0,0 +1,424 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::{ + chain_spec, + cli::{Cli, RelayChainCli, Subcommand}, + service::{new_partial, ParachainRuntimeExecutor}, +}; +use codec::Encode; +use cumulus_client_service::genesis::generate_genesis_block; +use cumulus_primitives_core::ParaId; +use log::info; +use polkadot_parachain::primitives::AccountIdConversion; +use rialto_parachain_runtime::{Block, RuntimeApi}; +use sc_cli::{ + ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, + NetworkParams, Result, RuntimeVersion, SharedParams, SubstrateCli, +}; +use sc_service::config::{BasePath, PrometheusConfig}; +use sp_core::hexdisplay::HexDisplay; +use sp_runtime::traits::Block as BlockT; +use std::{io::Write, net::SocketAddr}; + +fn load_spec( + id: &str, + para_id: ParaId, +) -> std::result::Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config(para_id)), + "" | "local" => Box::new(chain_spec::local_testnet_config(para_id)), + path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + }) +} + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Parachain Collator Template".into() + } + + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn description() -> String { + format!( + "Parachain Collator Template\n\nThe command-line arguments provided first will be \ + passed to the parachain node, while the arguments provided after -- will be passed \ + to the relaychain node.\n\n\ + {} [parachain-args] -- [relaychain-args]", + Self::executable_name() + ) + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into() + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn load_spec(&self, id: &str) -> std::result::Result, String> { + load_spec(id, self.parachain_id.unwrap_or(2000).into()) + } + + fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { + &rialto_parachain_runtime::VERSION + } +} + +impl SubstrateCli for RelayChainCli { + fn impl_name() -> String { + "Parachain Collator Template".into() + } + + fn impl_version() -> String { + env!("SUBSTRATE_CLI_IMPL_VERSION").into() + } + + fn description() -> String { + "Parachain Collator Template\n\nThe command-line arguments provided first will be \ + passed to the parachain node, while the arguments provided after -- will be passed \ + to the relaychain node.\n\n\ + parachain-collator [parachain-args] -- [relaychain-args]" + .into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into() + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn load_spec(&self, id: &str) -> std::result::Result, String> { + polkadot_cli::Cli::from_iter([RelayChainCli::executable_name()].iter()).load_spec(id) + } + + fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion { + polkadot_cli::Cli::native_runtime_version(chain_spec) + } +} + +fn extract_genesis_wasm(chain_spec: &dyn sc_service::ChainSpec) -> Result> { + let mut storage = chain_spec.build_storage()?; + + storage + .top + .remove(sp_core::storage::well_known_keys::CODE) + .ok_or_else(|| "Could not find wasm file in genesis state!".into()) +} + +macro_rules! construct_async_run { + (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ + let runner = $cli.create_runner($cmd)?; + runner.async_run(|$config| { + let $components = new_partial::< + RuntimeApi, + ParachainRuntimeExecutor, + _ + >( + &$config, + crate::service::parachain_build_import_queue, + )?; + let task_manager = $components.task_manager; + { $( $code )* }.map(|v| (v, task_manager)) + }) + }} +} + +/// Parse command line arguments into service configuration. +pub fn run() -> Result<()> { + let cli = Cli::from_args(); + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( + rialto_parachain_runtime::SS58Prefix::get() as u16, + )); + + match &cli.subcommand { + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + }, + Some(Subcommand::CheckBlock(cmd)) => { + construct_async_run!(|components, cli, cmd, config| { + Ok(cmd.run(components.client, components.import_queue)) + }) + }, + Some(Subcommand::ExportBlocks(cmd)) => { + construct_async_run!(|components, cli, cmd, config| Ok( + cmd.run(components.client, config.database) + )) + }, + Some(Subcommand::ExportState(cmd)) => { + construct_async_run!(|components, cli, cmd, config| Ok( + cmd.run(components.client, config.chain_spec) + )) + }, + Some(Subcommand::ImportBlocks(cmd)) => { + construct_async_run!(|components, cli, cmd, config| { + Ok(cmd.run(components.client, components.import_queue)) + }) + }, + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| { + let polkadot_cli = RelayChainCli::new( + &config, + [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), + ); + + let polkadot_config = SubstrateCli::create_configuration( + &polkadot_cli, + &polkadot_cli, + config.tokio_handle.clone(), + ) + .map_err(|err| format!("Relay chain argument error: {}", err))?; + + cmd.run(config, polkadot_config) + }) + }, + Some(Subcommand::Revert(cmd)) => { + construct_async_run!(|components, cli, cmd, config| Ok( + cmd.run(components.client, components.backend) + )) + }, + Some(Subcommand::ExportGenesisState(params)) => { + let mut builder = sc_cli::LoggerBuilder::new(""); + builder.with_profiling(sc_tracing::TracingReceiver::Log, ""); + let _ = builder.init(); + + let block: Block = generate_genesis_block(&load_spec( + ¶ms.chain.clone().unwrap_or_default(), + params.parachain_id.expect("Missing ParaId").into(), + )?)?; + let raw_header = block.header().encode(); + let output_buf = if params.raw { + raw_header + } else { + format!("0x{:?}", HexDisplay::from(&block.header().encode())).into_bytes() + }; + + if let Some(output) = ¶ms.output { + std::fs::write(output, output_buf)?; + } else { + std::io::stdout().write_all(&output_buf)?; + } + + Ok(()) + }, + Some(Subcommand::ExportGenesisWasm(params)) => { + let mut builder = sc_cli::LoggerBuilder::new(""); + builder.with_profiling(sc_tracing::TracingReceiver::Log, ""); + let _ = builder.init(); + + let raw_wasm_blob = + extract_genesis_wasm(&*cli.load_spec(¶ms.chain.clone().unwrap_or_default())?)?; + let output_buf = if params.raw { + raw_wasm_blob + } else { + format!("0x{:?}", HexDisplay::from(&raw_wasm_blob)).into_bytes() + }; + + if let Some(output) = ¶ms.output { + std::fs::write(output, output_buf)?; + } else { + std::io::stdout().write_all(&output_buf)?; + } + + Ok(()) + }, + Some(Subcommand::Benchmark(cmd)) => + if cfg!(feature = "runtime-benchmarks") { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| cmd.run::(config)) + } else { + Err("Benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + .into()) + }, + None => { + let runner = cli.create_runner(&cli.run.normalize())?; + + runner.run_node_until_exit(|config| async move { + let para_id = + chain_spec::Extensions::try_get(&*config.chain_spec).map(|e| e.para_id); + + let polkadot_cli = RelayChainCli::new( + &config, + [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), + ); + + let id = ParaId::from(cli.parachain_id.or(para_id).expect("Missing ParaId")); + + let parachain_account = + AccountIdConversion::::into_account(&id); + + let block: Block = + generate_genesis_block(&config.chain_spec).map_err(|e| format!("{:?}", e))?; + let genesis_state = format!("0x{:?}", HexDisplay::from(&block.header().encode())); + + let polkadot_config = SubstrateCli::create_configuration( + &polkadot_cli, + &polkadot_cli, + config.tokio_handle.clone(), + ) + .map_err(|err| format!("Relay chain argument error: {}", err))?; + + info!("Parachain id: {:?}", id); + info!("Parachain Account: {}", parachain_account); + info!("Parachain genesis state: {}", genesis_state); + info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); + + crate::service::start_node(config, polkadot_config, id) + .await + .map(|r| r.0) + .map_err(Into::into) + }) + }, + } +} + +impl DefaultConfigurationValues for RelayChainCli { + fn p2p_listen_port() -> u16 { + 30334 + } + + fn rpc_ws_listen_port() -> u16 { + 9945 + } + + fn rpc_http_listen_port() -> u16 { + 9934 + } + + fn prometheus_listen_port() -> u16 { + 9616 + } +} + +impl CliConfiguration for RelayChainCli { + fn shared_params(&self) -> &SharedParams { + self.base.base.shared_params() + } + + fn import_params(&self) -> Option<&ImportParams> { + self.base.base.import_params() + } + + fn network_params(&self) -> Option<&NetworkParams> { + self.base.base.network_params() + } + + fn keystore_params(&self) -> Option<&KeystoreParams> { + self.base.base.keystore_params() + } + + fn base_path(&self) -> Result> { + Ok(self + .shared_params() + .base_path() + .or_else(|| self.base_path.clone().map(Into::into))) + } + + fn rpc_http(&self, default_listen_port: u16) -> Result> { + self.base.base.rpc_http(default_listen_port) + } + + fn rpc_ipc(&self) -> Result> { + self.base.base.rpc_ipc() + } + + fn rpc_ws(&self, default_listen_port: u16) -> Result> { + self.base.base.rpc_ws(default_listen_port) + } + + fn prometheus_config(&self, default_listen_port: u16) -> Result> { + self.base.base.prometheus_config(default_listen_port) + } + + fn init(&self) -> Result<()> { + unreachable!("PolkadotCli is never initialized; qed"); + } + + fn chain_id(&self, is_dev: bool) -> Result { + let chain_id = self.base.base.chain_id(is_dev)?; + + Ok(if chain_id.is_empty() { self.chain_id.clone().unwrap_or_default() } else { chain_id }) + } + + fn role(&self, is_dev: bool) -> Result { + self.base.base.role(is_dev) + } + + fn transaction_pool(&self) -> Result { + self.base.base.transaction_pool() + } + + fn state_cache_child_ratio(&self) -> Result> { + self.base.base.state_cache_child_ratio() + } + + fn rpc_methods(&self) -> Result { + self.base.base.rpc_methods() + } + + fn rpc_ws_max_connections(&self) -> Result> { + self.base.base.rpc_ws_max_connections() + } + + fn rpc_cors(&self, is_dev: bool) -> Result>> { + self.base.base.rpc_cors(is_dev) + } + + fn default_heap_pages(&self) -> Result> { + self.base.base.default_heap_pages() + } + + fn force_authoring(&self) -> Result { + self.base.base.force_authoring() + } + + fn disable_grandpa(&self) -> Result { + self.base.base.disable_grandpa() + } + + fn max_runtime_instances(&self) -> Result> { + self.base.base.max_runtime_instances() + } + + fn announce_block(&self) -> Result { + self.base.base.announce_block() + } + + fn telemetry_endpoints( + &self, + chain_spec: &Box, + ) -> Result> { + self.base.base.telemetry_endpoints(chain_spec) + } +} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs b/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3ec291596b71946ca6a24347dac29598bdb2ca0b --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs @@ -0,0 +1,18 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +pub mod chain_spec; +pub mod service; diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/main.rs b/polkadot/bridges/bin/rialto-parachain/node/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..2b4e0b438d1a9a30524e6755bc14634cef65056b --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/node/src/main.rs @@ -0,0 +1,29 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate Parachain Node Template CLI + +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; + +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/service.rs b/polkadot/bridges/bin/rialto-parachain/node/src/service.rs new file mode 100644 index 0000000000000000000000000000000000000000..bd3afca307449a3f09243a6af9aa4f6982b2eeee --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/node/src/service.rs @@ -0,0 +1,493 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +// std +use std::sync::Arc; + +// Local Runtime Types +use rialto_parachain_runtime::RuntimeApi; + +// Cumulus Imports +use cumulus_client_consensus_aura::{ + build_aura_consensus, BuildAuraConsensusParams, SlotProportion, +}; +use cumulus_client_consensus_common::ParachainConsensus; +use cumulus_client_network::build_block_announce_validator; +use cumulus_client_service::{ + prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, +}; +use cumulus_primitives_core::ParaId; + +// Substrate Imports +use sc_client_api::ExecutorProvider; +use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}; +use sc_network::NetworkService; +use sc_service::{Configuration, PartialComponents, Role, TFullBackend, TFullClient, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; +use sp_api::ConstructRuntimeApi; +use sp_consensus::SlotData; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::traits::BlakeTwo256; +use substrate_prometheus_endpoint::Registry; + +// Runtime type overrides +type BlockNumber = u32; +type Header = sp_runtime::generic::Header; +pub type Block = sp_runtime::generic::Block; +type Hash = sp_core::H256; + +pub type ParachainRuntimeExecutor = ExecutorDispatch; + +// Our native executor instance. +pub struct ExecutorDispatch; + +impl NativeExecutionDispatch for ExecutorDispatch { + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + + fn dispatch(method: &str, data: &[u8]) -> Option> { + rialto_parachain_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + rialto_parachain_runtime::native_version() + } +} + +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +#[allow(clippy::type_complexity)] +pub fn new_partial( + config: &Configuration, + build_import_queue: BIQ, +) -> Result< + PartialComponents< + TFullClient>, + TFullBackend, + (), + sc_consensus::DefaultImportQueue< + Block, + TFullClient>, + >, + sc_transaction_pool::FullPool< + Block, + TFullClient>, + >, + (Option, Option), + >, + sc_service::Error, +> +where + RuntimeApi: ConstructRuntimeApi>> + + Send + + Sync + + 'static, + RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_api::Metadata + + sp_session::SessionKeys + + sp_api::ApiExt< + Block, + StateBackend = sc_client_api::StateBackendFor, Block>, + > + sp_offchain::OffchainWorkerApi + + sp_block_builder::BlockBuilder, + sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, + Executor: NativeExecutionDispatch + 'static, + BIQ: FnOnce( + Arc>>, + &Configuration, + Option, + &TaskManager, + ) -> Result< + sc_consensus::DefaultImportQueue< + Block, + TFullClient>, + >, + sc_service::Error, + >, +{ + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let executor = sc_executor::NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; + let client = Arc::new(client); + + let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); + telemetry + }); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_essential_handle(), + client.clone(), + ); + + let import_queue = build_import_queue( + client.clone(), + config, + telemetry.as_ref().map(|telemetry| telemetry.handle()), + &task_manager, + )?; + + let params = PartialComponents { + backend, + client, + import_queue, + keystore_container, + task_manager, + transaction_pool, + select_chain: (), + other: (telemetry, telemetry_worker_handle), + }; + + Ok(params) +} + +/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. +/// +/// This is the actual implementation that is abstract over the executor and the runtime api. +#[sc_tracing::logging::prefix_logs_with("Parachain")] +async fn start_node_impl( + parachain_config: Configuration, + polkadot_config: Configuration, + id: ParaId, + rpc_ext_builder: RB, + build_import_queue: BIQ, + build_consensus: BIC, +) -> sc_service::error::Result<( + TaskManager, + Arc>>, +)> +where + RuntimeApi: ConstructRuntimeApi>> + + Send + + Sync + + 'static, + RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_api::Metadata + + sp_session::SessionKeys + + sp_api::ApiExt< + Block, + StateBackend = sc_client_api::StateBackendFor, Block>, + > + sp_offchain::OffchainWorkerApi + + sp_block_builder::BlockBuilder + + cumulus_primitives_core::CollectCollationInfo, + sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, + Executor: NativeExecutionDispatch + 'static, + RB: Fn( + Arc>>, + ) -> jsonrpc_core::IoHandler + + Send + + 'static, + BIQ: FnOnce( + Arc>>, + &Configuration, + Option, + &TaskManager, + ) -> Result< + sc_consensus::DefaultImportQueue< + Block, + TFullClient>, + >, + sc_service::Error, + >, + BIC: FnOnce( + Arc>>, + Option<&Registry>, + Option, + &TaskManager, + &polkadot_service::NewFull, + Arc< + sc_transaction_pool::FullPool< + Block, + TFullClient>, + >, + >, + Arc>, + SyncCryptoStorePtr, + bool, + ) -> Result>, sc_service::Error>, +{ + if matches!(parachain_config.role, Role::Light) { + return Err("Light client not supported!".into()) + } + + let parachain_config = prepare_node_config(parachain_config); + + let params = new_partial::(¶chain_config, build_import_queue)?; + let (mut telemetry, telemetry_worker_handle) = params.other; + + let relay_chain_full_node = + cumulus_client_service::build_polkadot_full_node(polkadot_config, telemetry_worker_handle) + .map_err(|e| match e { + polkadot_service::Error::Sub(x) => x, + s => format!("{}", s).into(), + })?; + + let client = params.client.clone(); + let backend = params.backend.clone(); + let block_announce_validator = build_block_announce_validator( + relay_chain_full_node.client.clone(), + id, + Box::new(relay_chain_full_node.network.clone()), + relay_chain_full_node.backend.clone(), + ); + + let force_authoring = parachain_config.force_authoring; + let validator = parachain_config.role.is_authority(); + let prometheus_registry = parachain_config.prometheus_registry().cloned(); + let transaction_pool = params.transaction_pool.clone(); + let mut task_manager = params.task_manager; + let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue); + let (network, system_rpc_tx, start_network) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: ¶chain_config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue: import_queue.clone(), + block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)), + warp_sync: None, + })?; + + let rpc_client = client.clone(); + let rpc_extensions_builder = Box::new(move |_, _| Ok(rpc_ext_builder(rpc_client.clone()))); + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + rpc_extensions_builder, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + task_manager: &mut task_manager, + config: parachain_config, + keystore: params.keystore_container.sync_keystore(), + backend: backend.clone(), + network: network.clone(), + system_rpc_tx, + telemetry: telemetry.as_mut(), + })?; + + let announce_block = { + let network = network.clone(); + Arc::new(move |hash, data| network.announce_block(hash, data)) + }; + + if validator { + let parachain_consensus = build_consensus( + client.clone(), + prometheus_registry.as_ref(), + telemetry.as_ref().map(|t| t.handle()), + &task_manager, + &relay_chain_full_node, + transaction_pool, + network, + params.keystore_container.sync_keystore(), + force_authoring, + )?; + + let spawner = task_manager.spawn_handle(); + + let params = StartCollatorParams { + para_id: id, + block_status: client.clone(), + announce_block, + client: client.clone(), + task_manager: &mut task_manager, + relay_chain_full_node, + spawner, + parachain_consensus, + import_queue, + }; + + start_collator(params).await?; + } else { + let params = StartFullNodeParams { + client: client.clone(), + announce_block, + task_manager: &mut task_manager, + para_id: id, + relay_chain_full_node, + }; + + start_full_node(params)?; + } + + start_network.start_network(); + + Ok((task_manager, client)) +} + +/// Build the import queue for the the parachain runtime. +#[allow(clippy::type_complexity)] +pub fn parachain_build_import_queue( + client: Arc>>, + config: &Configuration, + telemetry: Option, + task_manager: &TaskManager, +) -> Result< + sc_consensus::DefaultImportQueue< + Block, + TFullClient>, + >, + sc_service::Error, +> { + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + + cumulus_client_consensus_aura::import_queue::< + sp_consensus_aura::sr25519::AuthorityPair, + _, + _, + _, + _, + _, + _, + >(cumulus_client_consensus_aura::ImportQueueParams { + block_import: client.clone(), + client: client.clone(), + create_inherent_data_providers: move |_, _| async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *time, + slot_duration.slot_duration(), + ); + + Ok((time, slot)) + }, + registry: config.prometheus_registry(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), + spawner: &task_manager.spawn_essential_handle(), + telemetry, + }) + .map_err(Into::into) +} + +/// Start a normal parachain node. +pub async fn start_node( + parachain_config: Configuration, + polkadot_config: Configuration, + id: ParaId, +) -> sc_service::error::Result<( + TaskManager, + Arc>>, +)> { + start_node_impl::( + parachain_config, + polkadot_config, + id, + |_| Default::default(), + parachain_build_import_queue, + |client, + prometheus_registry, + telemetry, + task_manager, + relay_chain_node, + transaction_pool, + sync_oracle, + keystore, + force_authoring| { + let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; + + let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry, + telemetry.clone(), + ); + + let relay_chain_backend = relay_chain_node.backend.clone(); + let relay_chain_client = relay_chain_node.client.clone(); + Ok(build_aura_consensus::< + sp_consensus_aura::sr25519::AuthorityPair, + _, + _, + _, + _, + _, + _, + _, + _, + _, + >(BuildAuraConsensusParams { + proposer_factory, + create_inherent_data_providers: move |_, (relay_parent, validation_data)| { + let parachain_inherent = + cumulus_primitives_parachain_inherent::ParachainInherentData::create_at_with_client( + relay_parent, + &relay_chain_client, + &*relay_chain_backend, + &validation_data, + id, + ); + async move { + let time = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( + *time, + slot_duration.slot_duration(), + ); + + let parachain_inherent = parachain_inherent.ok_or_else(|| { + Box::::from( + "Failed to create parachain inherent", + ) + })?; + Ok((time, slot, parachain_inherent)) + } + }, + block_import: client.clone(), + relay_chain_client: relay_chain_node.client.clone(), + relay_chain_backend: relay_chain_node.backend.clone(), + para_client: client, + backoff_authoring_blocks: Option::<()>::None, + sync_oracle, + keystore, + force_authoring, + slot_duration, + // We got around 500ms for proposing + block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32), + telemetry, + max_block_proposal_slot_portion: None, + })) + }, + ) + .await +} diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml b/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..20ce70aba8f6b22f219005a9337dceb90f44c0ca --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml @@ -0,0 +1,122 @@ +[package] +name = "rialto-parachain-runtime" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/parity-bridges-common/" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[build-dependencies] +substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dependencies] +codec = { package = 'parity-scale-codec', version = '2.0.0', default-features = false, features = ['derive']} +log = { version = "0.4.14", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = '1.0', optional = true, features = ['derive'] } + +# Bridge depedencies + +bp-rialto-parachain = { path = "../../../primitives/chain-rialto-parachain", default-features = false } + +# Substrate Dependencies +## Substrate Primitive Dependencies +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +## Substrate FRAME Dependencies +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +## Substrate Pallet Dependencies +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +# Cumulus Dependencies +cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } +parachain-info = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } + +# Polkadot Dependencies +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } +xcm = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } +xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } +xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } +pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } + +[features] +default = ['std'] +runtime-benchmarks = [ + 'sp-runtime/runtime-benchmarks', + 'frame-benchmarking', + 'frame-support/runtime-benchmarks', + 'frame-system-benchmarking', + 'frame-system/runtime-benchmarks', + 'pallet-balances/runtime-benchmarks', + 'pallet-timestamp/runtime-benchmarks', +] +std = [ + "bp-rialto-parachain/std", + "codec/std", + "log/std", + "scale-info/std", + "serde", + "sp-api/std", + "sp-std/std", + "sp-io/std", + "sp-core/std", + "sp-runtime/std", + "sp-version/std", + "sp-offchain/std", + "sp-session/std", + "sp-block-builder/std", + "sp-transaction-pool/std", + "sp-inherents/std", + "frame-support/std", + "frame-executive/std", + "frame-system/std", + "pallet-balances/std", + "pallet-randomness-collective-flip/std", + "pallet-timestamp/std", + "pallet-sudo/std", + "pallet-transaction-payment/std", + "parachain-info/std", + "cumulus-pallet-aura-ext/std", + "cumulus-pallet-parachain-system/std", + "cumulus-pallet-xcmp-queue/std", + "cumulus-pallet-xcm/std", + "cumulus-primitives-core/std", + "cumulus-primitives-timestamp/std", + "cumulus-primitives-utility/std", + "xcm/std", + "xcm-builder/std", + "xcm-executor/std", + "pallet-aura/std", + "sp-consensus-aura/std", +] diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/build.rs b/polkadot/bridges/bin/rialto-parachain/runtime/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..65095bd1b7e9e002f74bdfafc5c05e2554846ebd --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/runtime/build.rs @@ -0,0 +1,25 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use substrate_wasm_builder::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .build() +} diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs b/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..5b71674b7fe9b91cb34860aff15ed899415f3baf --- /dev/null +++ b/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs @@ -0,0 +1,646 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The Rialto parachain runtime. This can be compiled with `#[no_std]`, ready for Wasm. +//! +//! Originally a copy of runtime from https://github.com/substrate-developer-hub/substrate-parachain-template. + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use sp_api::impl_runtime_apis; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + traits::{AccountIdLookup, Block as BlockT}, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, +}; + +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +// A few exports that help ease life for downstream crates. +pub use frame_support::{ + construct_runtime, match_type, parameter_types, + traits::{Everything, IsInVec, Randomness}, + weights::{ + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + DispatchClass, IdentityFee, Weight, + }, + StorageValue, +}; +pub use frame_system::Call as SystemCall; +pub use pallet_balances::Call as BalancesCall; +pub use pallet_timestamp::Call as TimestampCall; +pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{MultiAddress, Perbill, Permill}; + +pub use bp_rialto_parachain::{ + AccountId, Balance, BlockLength, BlockNumber, BlockWeights, Hash, Hasher as Hashing, Header, + Index, Signature, MAXIMUM_BLOCK_WEIGHT, +}; + +// Polkadot & XCM imports +use pallet_xcm::XcmPassthrough; +use polkadot_parachain::primitives::Sibling; +use xcm::latest::prelude::*; +use xcm_builder::{ + AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, CurrencyAdapter, + EnsureXcmOrigin, FixedWeightBounds, IsConcrete, LocationInverter, NativeAsset, + ParentAsSuperuser, ParentIsDefault, RelayChainAsNative, SiblingParachainAsNative, + SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, + SovereignSignedViaLocation, TakeWeightCredit, UsingComponents, +}; +use xcm_executor::{Config, XcmExecutor}; + +/// The address format for describing accounts. +pub type Address = MultiAddress; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckSpecVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllPallets, +>; + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + } +} + +/// This runtime version. +#[sp_version::runtime_version] +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("template-parachain"), + impl_name: create_runtime_str!("template-parachain"), + authoring_version: 1, + spec_version: 1, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, +}; + +/// This determines the average expected block time that we are targeting. +/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. +/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked +/// up by `pallet_aura` to implement `fn slot_duration()`. +/// +/// Change this to adjust the block time. +pub const MILLISECS_PER_BLOCK: u64 = 12000; + +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + +pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; + +// Time is measured by number of blocks. +pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); +pub const HOURS: BlockNumber = MINUTES * 60; +pub const DAYS: BlockNumber = HOURS * 24; + +// Unit = the base number of indivisible units for balances +pub const UNIT: Balance = 1_000_000_000_000; +pub const MILLIUNIT: Balance = 1_000_000_000; +pub const MICROUNIT: Balance = 1_000_000; + +// 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. +pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } +} + +parameter_types! { + pub const BlockHashCount: BlockNumber = 250; + pub const Version: RuntimeVersion = VERSION; + pub const SS58Prefix: u8 = 48; +} + +// Configure FRAME pallets to include in runtime. + +impl frame_system::Config for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type Call = Call; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = AccountIdLookup; + /// The index type for storing how many extrinsics an account has signed. + type Index = Index; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = Hashing; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous origin type. + type Origin = Origin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Runtime version. + type Version = Version; + /// Converts a module to an index of this module in the runtime. + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The weight of database operations that the runtime can invoke. + type DbWeight = (); + /// The basic call filter to use in dispatchable. + type BaseCallFilter = Everything; + /// Weight information for the extrinsics of this pallet. + type SystemWeightInfo = (); + /// Block & extrinsics weights: base values and limits. + type BlockWeights = BlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = BlockLength; + /// This is used as an identifier of the chain. 42 is the generic substrate prefix. + type SS58Prefix = SS58Prefix; + /// The action to take on a Runtime Upgrade + type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; +} + +parameter_types! { + pub const MinimumPeriod: u64 = SLOT_DURATION / 2; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the Unix epoch. + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: u128 = MILLIUNIT; + pub const TransferFee: u128 = MILLIUNIT; + pub const CreationFee: u128 = MILLIUNIT; + pub const TransactionByteFee: u128 = MICROUNIT; + pub const OperationalFeeMultiplier: u8 = 5; + pub const MaxLocks: u32 = 50; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = pallet_balances::weights::SubstrateWeight; + type MaxLocks = MaxLocks; + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; + type WeightToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl pallet_sudo::Config for Runtime { + type Call = Call; + type Event = Event; +} + +parameter_types! { + pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4; + pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4; +} + +impl cumulus_pallet_parachain_system::Config for Runtime { + type Event = Event; + type OnValidationData = (); + type SelfParaId = parachain_info::Pallet; + type OutboundXcmpMessageSource = XcmpQueue; + type DmpMessageHandler = DmpQueue; + type ReservedDmpWeight = ReservedDmpWeight; + type XcmpMessageHandler = XcmpQueue; + type ReservedXcmpWeight = ReservedXcmpWeight; +} + +impl parachain_info::Config for Runtime {} + +impl cumulus_pallet_aura_ext::Config for Runtime {} + +impl pallet_randomness_collective_flip::Config for Runtime {} + +parameter_types! { + pub const RelayLocation: MultiLocation = MultiLocation::parent(); + pub const RelayNetwork: NetworkId = NetworkId::Polkadot; + pub RelayOrigin: Origin = cumulus_pallet_xcm::Origin::Relay.into(); + pub Ancestry: MultiLocation = Parachain(ParachainInfo::parachain_id().into()).into(); +} + +/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used +/// when determining ownership of accounts for asset transacting and when attempting to use XCM +/// `Transact` in order to determine the dispatch Origin. +pub type LocationToAccountId = ( + // The parent (Relay-chain) origin converts to the default `AccountId`. + ParentIsDefault, + // Sibling parachain origins convert to AccountId via the `ParaId::into`. + SiblingParachainConvertsVia, + // Straight up local `AccountId32` origins just alias directly to `AccountId`. + AccountId32Aliases, +); + +/// Means for transacting assets on this chain. +pub type LocalAssetTransactor = CurrencyAdapter< + // Use this currency: + Balances, + // Use this currency when it is a fungible asset matching the given location or name: + IsConcrete, + // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: + LocationToAccountId, + // Our chain's account ID type (we can't get away without mentioning it explicitly): + AccountId, + // We don't track any teleports. + (), +>; + +/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, +/// ready for dispatching a transaction with XCM `Transact`. There is an `OriginKind` which can +/// biases the kind of local `Origin` it will become. +pub type XcmOriginToTransactDispatchOrigin = ( + // Sovereign account converter; this attempts to derive an `AccountId` from the origin location + // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for + // foreign chains who want to have a local sovereign account on this chain which they control. + SovereignSignedViaLocation, + // Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when + // recognised. + RelayChainAsNative, + // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when + // recognised. + SiblingParachainAsNative, + // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a + // transaction from the Root origin. + ParentAsSuperuser, + // Native signed account converter; this just converts an `AccountId32` origin into a normal + // `Origin::Signed` origin of the same 32-byte value. + SignedAccountId32AsNative, + // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. + XcmPassthrough, +); + +parameter_types! { + // One XCM operation is 1_000_000 weight - almost certainly a conservative estimate. + pub UnitWeightCost: Weight = 1_000_000; + // One UNIT buys 1 second of weight. + pub const WeightPrice: (MultiLocation, u128) = (MultiLocation::parent(), UNIT); + pub const MaxInstructions: u32 = 100; + pub const MaxAuthorities: u32 = 100_000; +} + +match_type! { + pub type ParentOrParentsUnitPlurality: impl Contains = { + MultiLocation { parents: 1, interior: Here } | + MultiLocation { parents: 1, interior: X1(Plurality { id: BodyId::Unit, .. }) } + }; +} + +pub type Barrier = ( + TakeWeightCredit, + AllowTopLevelPaidExecutionFrom, + AllowUnpaidExecutionFrom, + // ^^^ Parent & its unit plurality gets free execution +); + +pub struct XcmConfig; +impl Config for XcmConfig { + type Call = Call; + type XcmSender = XcmRouter; + // How to withdraw and deposit an asset. + type AssetTransactor = LocalAssetTransactor; + type OriginConverter = XcmOriginToTransactDispatchOrigin; + type IsReserve = NativeAsset; + type IsTeleporter = NativeAsset; // <- should be enough to allow teleportation of UNIT + type LocationInverter = LocationInverter; + type Barrier = Barrier; + type Weigher = FixedWeightBounds; + type Trader = UsingComponents, RelayLocation, AccountId, Balances, ()>; + type ResponseHandler = PolkadotXcm; + type AssetTrap = PolkadotXcm; + type AssetClaims = PolkadotXcm; + type SubscriptionService = PolkadotXcm; +} + +/// No local origins on this chain are allowed to dispatch XCM sends/executions. +pub type LocalOriginToLocation = SignedToAccountId32; + +/// The means for routing XCM messages which are not for local execution into the right message +/// queues. +pub type XcmRouter = ( + // Two routers - use UMP to communicate with the relay chain: + cumulus_primitives_utility::ParentAsUmp, + // ..and XCMP to communicate with the sibling chains. + XcmpQueue, +); + +impl pallet_xcm::Config for Runtime { + type Event = Event; + type SendXcmOrigin = EnsureXcmOrigin; + type XcmRouter = XcmRouter; + type ExecuteXcmOrigin = EnsureXcmOrigin; + type XcmExecuteFilter = Everything; + type XcmExecutor = XcmExecutor; + type XcmTeleportFilter = Everything; + type XcmReserveTransferFilter = Everything; + type Weigher = FixedWeightBounds; + type LocationInverter = LocationInverter; + type Origin = Origin; + type Call = Call; + const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; + type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; +} + +impl cumulus_pallet_xcm::Config for Runtime { + type Event = Event; + type XcmExecutor = XcmExecutor; +} + +impl cumulus_pallet_xcmp_queue::Config for Runtime { + type Event = Event; + type XcmExecutor = XcmExecutor; + type ChannelInfo = ParachainSystem; + type VersionWrapper = (); +} + +impl cumulus_pallet_dmp_queue::Config for Runtime { + type Event = Event; + type XcmExecutor = XcmExecutor; + type ExecuteOverweightOrigin = frame_system::EnsureRoot; +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; + type DisabledValidators = (); + type MaxAuthorities = MaxAuthorities; +} + +// /// Configure the pallet template in pallets/template. +// impl template::Config for Runtime { +// type Event = Event; +// } + +// Create the runtime by composing the FRAME pallets that were previously configured. +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = generic::Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Storage, Config, Event}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + + ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Storage, Inherent, Event} = 20, + ParachainInfo: parachain_info::{Pallet, Storage, Config} = 21, + + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 30, + + Aura: pallet_aura::{Pallet, Config}, + AuraExt: cumulus_pallet_aura_ext::{Pallet, Config}, + + // XCM helpers. + XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 50, + PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin} = 51, + CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Event, Origin} = 52, + DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 53, + + // //Template + // TemplatePallet: template::{Pallet, Call, Storage, Event}, + } +); + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + OpaqueMetadata::new(Runtime::metadata().into()) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic( + extrinsic: ::Extrinsic, + ) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + block_hash: ::Hash, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx, block_hash) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_session::SessionKeys for Runtime { + fn decode_session_keys( + encoded: Vec, + ) -> Option, KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities().to_vec() + } + } + + impl cumulus_primitives_core::CollectCollationInfo for Runtime { + fn collect_collation_info() -> cumulus_primitives_core::CollationInfo { + ParachainSystem::collect_collation_info() + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Index { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { + fn query_info( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details( + uxt: ::Extrinsic, + len: u32, + ) -> pallet_transaction_payment::FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; + + use frame_system_benchmarking::Pallet as SystemBench; + impl frame_system_benchmarking::Config for Runtime {} + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Total Issuance + hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + + add_benchmark!(params, batches, frame_system, SystemBench::); + add_benchmark!(params, batches, pallet_balances, Balances); + add_benchmark!(params, batches, pallet_timestamp, Timestamp); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } +} + +struct CheckInherents; + +impl cumulus_pallet_parachain_system::CheckInherents for CheckInherents { + fn check_inherents( + block: &Block, + relay_state_proof: &cumulus_pallet_parachain_system::RelayChainStateProof, + ) -> sp_inherents::CheckInherentsResult { + let relay_chain_slot = relay_state_proof + .read_slot() + .expect("Could not read the relay chain slot from the proof"); + + let inherent_data = + cumulus_primitives_timestamp::InherentDataProvider::from_relay_chain_slot_and_duration( + relay_chain_slot, + sp_std::time::Duration::from_secs(6), + ) + .create_inherent_data() + .expect("Could not create the timestamp inherent data"); + + inherent_data.check_extrinsics(block) + } +} + +cumulus_pallet_parachain_system::register_validate_block!( + Runtime = Runtime, + BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, + CheckInherents = CheckInherents, +); diff --git a/polkadot/bridges/bin/rialto/node/Cargo.toml b/polkadot/bridges/bin/rialto/node/Cargo.toml index 1c9ec8b5bb0abc2ce43eb079753b2dc7a593c365..2795f2eecaecc00cb4532123ccae3eb7c2216a3c 100644 --- a/polkadot/bridges/bin/rialto/node/Cargo.toml +++ b/polkadot/bridges/bin/rialto/node/Cargo.toml @@ -10,13 +10,17 @@ repository = "https://github.com/paritytech/parity-bridges-common/" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -jsonrpc-core = "15.1.0" +futures = "0.3" +jsonrpc-core = "18.0" +kvdb = "0.10" +kvdb-rocksdb = "0.12" +lru = "0.7" structopt = "0.3.21" serde_json = "1.0.59" +thiserror = "1.0" # Bridge dependencies -bp-messages = { path = "../../../primitives/messages" } bp-runtime = { path = "../../../primitives/runtime" } bp-rialto = { path = "../../../primitives/chain-rialto" } pallet-bridge-messages = { path = "../../../modules/messages" } @@ -24,32 +28,86 @@ rialto-runtime = { path = "../runtime" } # Substrate Dependencies - +beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-gadget-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus-uncles = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +#sc-finality-grandpa-warp-sync = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } + +# Polkadot Dependencies + +polkadot-client = { git = "https://github.com/paritytech/polkadot", branch = "master" } + +# Polkadot (parachain) Dependencies + +polkadot-approval-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-availability-bitfield-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-availability-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-availability-recovery = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-collator-protocol = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-dispute-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-gossip-support = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-network-bridge = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-collation-generation = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-approval-voting = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-av-store = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-backing = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-bitfield-signing = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-candidate-validation = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-chain-api = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-chain-selection = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-parachains-inherent = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-provisioner = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-pvf = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-runtime-api = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-core-dispute-coordinator = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-network-protocol = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-node-subsystem-util = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-overseer = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-statement-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" } [build-dependencies] substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs index 4174cda24487ee4016cce39968a32c94c6987610..fb18a35a6af0ebd43fc48e7db0d0302876095850 100644 --- a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs +++ b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs @@ -14,13 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use beefy_primitives::crypto::AuthorityId as BeefyId; use bp_rialto::derive_account_from_millau_id; +use polkadot_primitives::v1::{AssignmentId, ValidatorId}; use rialto_runtime::{ - AccountId, AuraConfig, BalancesConfig, BridgeKovanConfig, BridgeRialtoPoaConfig, GenesisConfig, GrandpaConfig, - SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, + AccountId, BabeConfig, BalancesConfig, BeefyConfig, BridgeMillauMessagesConfig, + ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, + SudoConfig, SystemConfig, WASM_BINARY, }; use serde_json::json; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_consensus_babe::AuthorityId as BabeId; use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{IdentifyAccount, Verify}; @@ -56,12 +60,18 @@ where AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) { +/// Helper function to generate authority keys. +pub fn get_authority_keys_from_seed( + s: &str, +) -> (AccountId, BabeId, BeefyId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { ( get_account_id_from_seed::(s), - get_from_seed::(s), + get_from_seed::(s), + get_from_seed::(s), get_from_seed::(s), + get_from_seed::(s), + get_from_seed::(s), + get_from_seed::(s), ) } @@ -71,10 +81,7 @@ impl Alternative { let properties = Some( json!({ "tokenDecimals": 9, - "tokenSymbol": "RLT", - "bridgeIds": { - "Millau": bp_runtime::MILLAU_CHAIN_ID, - } + "tokenSymbol": "RLT" }) .as_object() .expect("Map given; qed") @@ -82,8 +89,8 @@ impl Alternative { ); match self { Alternative::Development => ChainSpec::from_genesis( - "Development", - "dev", + "Rialto Development", + "rialto_dev", sc_service::ChainType::Development, || { testnet_genesis( @@ -108,8 +115,8 @@ impl Alternative { None, ), Alternative::LocalTestnet => ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", + "Rialto Local", + "rialto_local", sc_service::ChainType::Local, || { testnet_genesis( @@ -138,10 +145,12 @@ impl Alternative { get_account_id_from_seed::("Ferdie//stash"), get_account_id_from_seed::("George//stash"), get_account_id_from_seed::("Harry//stash"), - pallet_bridge_messages::Pallet::< - rialto_runtime::Runtime, - pallet_bridge_messages::DefaultInstance, - >::relayer_fund_account_id(), + get_account_id_from_seed::("MillauMessagesOwner"), + get_account_id_from_seed::("WithMillauTokenSwap"), + pallet_bridge_messages::relayer_fund_account_id::< + bp_rialto::AccountId, + bp_rialto::AccountIdConverter, + >(), derive_account_from_millau_id(bp_runtime::SourceAccount::Account( get_account_id_from_seed::("Alice"), )), @@ -174,12 +183,27 @@ impl Alternative { } } -fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys { - SessionKeys { aura, grandpa } +fn session_keys( + babe: BabeId, + beefy: BeefyId, + grandpa: GrandpaId, + para_validator: ValidatorId, + para_assignment: AssignmentId, + authority_discovery: AuthorityDiscoveryId, +) -> SessionKeys { + SessionKeys { babe, beefy, grandpa, para_validator, para_assignment, authority_discovery } } fn testnet_genesis( - initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>, + initial_authorities: Vec<( + AccountId, + BabeId, + BeefyId, + GrandpaId, + ValidatorId, + AssignmentId, + AuthorityDiscoveryId, + )>, root_key: AccountId, endowed_accounts: Vec, _enable_println: bool, @@ -187,51 +211,93 @@ fn testnet_genesis( GenesisConfig { system: SystemConfig { code: WASM_BINARY.expect("Rialto development WASM not available").to_vec(), - changes_trie_config: Default::default(), }, balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), }, - aura: AuraConfig { - authorities: Vec::new(), - }, - bridge_rialto_poa: load_rialto_poa_bridge_config(), - bridge_kovan: load_kovan_bridge_config(), - grandpa: GrandpaConfig { + babe: BabeConfig { authorities: Vec::new(), + epoch_config: Some(rialto_runtime::BABE_GENESIS_EPOCH_CONFIG), }, + beefy: BeefyConfig { authorities: Vec::new() }, + grandpa: GrandpaConfig { authorities: Vec::new() }, sudo: SudoConfig { key: root_key }, session: SessionConfig { keys: initial_authorities .iter() - .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys( + x.1.clone(), + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + ), + ) + }) .collect::>(), }, - } -} - -fn load_rialto_poa_bridge_config() -> BridgeRialtoPoaConfig { - BridgeRialtoPoaConfig { - initial_header: rialto_runtime::rialto_poa::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::rialto_poa::genesis_validators(), - } -} - -fn load_kovan_bridge_config() -> BridgeKovanConfig { - BridgeKovanConfig { - initial_header: rialto_runtime::kovan::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::kovan::genesis_validators(), + authority_discovery: Default::default(), + hrmp: Default::default(), + // this configuration is exact copy of configuration from Polkadot repo + // (see /node/service/src/chain_spec.rs:default_parachains_host_configuration) + configuration: ConfigurationConfig { + config: polkadot_runtime_parachains::configuration::HostConfiguration { + validation_upgrade_frequency: 1u32, + validation_upgrade_delay: 1, + code_retention_period: 1200, + max_code_size: polkadot_primitives::v1::MAX_CODE_SIZE, + max_pov_size: polkadot_primitives::v1::MAX_POV_SIZE, + max_head_data_size: 32 * 1024, + group_rotation_frequency: 20, + chain_availability_period: 4, + thread_availability_period: 4, + max_upward_queue_count: 8, + max_upward_queue_size: 1024 * 1024, + max_downward_message_size: 1024, + // this is approximatelly 4ms. + // + // Same as `4 * frame_support::weights::WEIGHT_PER_MILLIS`. We don't bother with + // an import since that's a made up number and should be replaced with a constant + // obtained by benchmarking anyway. + ump_service_total_weight: 4 * 1_000_000_000, + max_upward_message_size: 1024 * 1024, + max_upward_message_num_per_candidate: 5, + hrmp_sender_deposit: 0, + hrmp_recipient_deposit: 0, + hrmp_channel_max_capacity: 8, + hrmp_channel_max_total_size: 8 * 1024, + hrmp_max_parachain_inbound_channels: 4, + hrmp_max_parathread_inbound_channels: 4, + hrmp_channel_max_message_size: 1024 * 1024, + hrmp_max_parachain_outbound_channels: 4, + hrmp_max_parathread_outbound_channels: 4, + hrmp_max_message_num_per_candidate: 5, + dispute_period: 6, + no_show_slots: 2, + n_delay_tranches: 25, + needed_approvals: 2, + relay_vrf_modulo_samples: 2, + zeroth_delay_tranche_width: 0, + ..Default::default() + }, + }, + paras: Default::default(), + bridge_millau_messages: BridgeMillauMessagesConfig { + owner: Some(get_account_id_from_seed::("MillauMessagesOwner")), + ..Default::default() + }, } } #[test] fn derived_dave_account_is_as_expected() { let dave = get_account_id_from_seed::("Dave"); - let derived: AccountId = derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave)); - assert_eq!( - derived.to_string(), - "5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string() - ); + let derived: AccountId = + derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave)); + assert_eq!(derived.to_string(), "5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string()); } diff --git a/polkadot/bridges/bin/rialto/node/src/cli.rs b/polkadot/bridges/bin/rialto/node/src/cli.rs index 46323ed25c9ed2e39ebc6089b5bfa0e2ad29ddfd..3f85a69a713fe5125f2fe8d402c8fb1d9608b107 100644 --- a/polkadot/bridges/bin/rialto/node/src/cli.rs +++ b/polkadot/bridges/bin/rialto/node/src/cli.rs @@ -29,10 +29,10 @@ pub struct Cli { /// Possible subcommands of the main binary. #[derive(Debug, StructOpt)] pub enum Subcommand { - /// Key management cli utilities + /// Key management CLI utilities Key(sc_cli::KeySubcommand), - /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + /// Verify a signature for a message, provided on `STDIN`, with a given (public or secret) key. Verify(sc_cli::VerifyCmd), /// Generate a seed that provides a vanity address. @@ -67,4 +67,19 @@ pub enum Subcommand { /// Benchmark runtime pallets. Benchmark(frame_benchmarking_cli::BenchmarkCmd), + + /// FOR INTERNAL USE: analog of the "prepare-worker" command of the polkadot binary. + #[structopt(name = "prepare-worker", setting = structopt::clap::AppSettings::Hidden)] + PvfPrepareWorker(ValidationWorkerCommand), + + /// FOR INTERNAL USE: analog of the "execute-worker" command of the polkadot binary. + #[structopt(name = "execute-worker", setting = structopt::clap::AppSettings::Hidden)] + PvfExecuteWorker(ValidationWorkerCommand), +} + +/// Validation worker command. +#[derive(Debug, StructOpt)] +pub struct ValidationWorkerCommand { + /// The path to the validation host's socket. + pub socket_path: String, } diff --git a/polkadot/bridges/bin/rialto/node/src/command.rs b/polkadot/bridges/bin/rialto/node/src/command.rs index a9930c57417ec0e68cf46f42f6090f18103ec9f8..7be615a57760c56d450f794d775b25df2089b24a 100644 --- a/polkadot/bridges/bin/rialto/node/src/command.rs +++ b/polkadot/bridges/bin/rialto/node/src/command.rs @@ -14,9 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::{Cli, Subcommand}; -use crate::service; -use crate::service::new_partial; +use crate::{ + cli::{Cli, Subcommand}, + service::new_partial, +}; use rialto_runtime::{Block, RuntimeApi}; use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; use sc_service::PartialComponents; @@ -69,24 +70,23 @@ impl SubstrateCli for Cli { /// Parse and run command line arguments pub fn run() -> sc_cli::Result<()> { let cli = Cli::from_args(); - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( rialto_runtime::SS58Prefix::get() as u16, )); match &cli.subcommand { - Some(Subcommand::Benchmark(cmd)) => { + Some(Subcommand::Benchmark(cmd)) => if cfg!(feature = "runtime-benchmarks") { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) + runner.sync_run(|config| cmd.run::(config)) } else { println!( "Benchmarking wasn't enabled when building the node. \ You can enable it with `--features runtime-benchmarks`." ); Ok(()) - } - } + }, Some(Subcommand::Key(cmd)) => cmd.run(&cli), Some(Subcommand::Sign(cmd)) => cmd.run(), Some(Subcommand::Verify(cmd)) => cmd.run(), @@ -94,79 +94,99 @@ pub fn run() -> sc_cli::Result<()> { Some(Subcommand::BuildSpec(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) - } + }, Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; + runner.async_run(|mut config| { + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&mut config).map_err(service_error)?; Ok((cmd.run(client, import_queue), task_manager)) }) - } + }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; + runner.async_run(|mut config| { + let PartialComponents { client, task_manager, .. } = + new_partial(&mut config).map_err(service_error)?; Ok((cmd.run(client, config.database), task_manager)) }) - } + }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; + runner.async_run(|mut config| { + let PartialComponents { client, task_manager, .. } = + new_partial(&mut config).map_err(service_error)?; Ok((cmd.run(client, config.chain_spec), task_manager)) }) - } + }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; + runner.async_run(|mut config| { + let PartialComponents { client, task_manager, import_queue, .. } = + new_partial(&mut config).map_err(service_error)?; Ok((cmd.run(client, import_queue), task_manager)) }) - } + }, Some(Subcommand::PurgeChain(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| cmd.run(config.database)) - } + }, Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - backend, - .. - } = new_partial(&config)?; + runner.async_run(|mut config| { + let PartialComponents { client, task_manager, backend, .. } = + new_partial(&mut config).map_err(service_error)?; Ok((cmd.run(client, backend), task_manager)) }) - } + }, Some(Subcommand::Inspect(cmd)) => { let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) - } + runner.sync_run(|config| { + cmd.run::(config) + }) + }, + Some(Subcommand::PvfPrepareWorker(cmd)) => { + let mut builder = sc_cli::LoggerBuilder::new(""); + builder.with_colors(false); + let _ = builder.init(); + + polkadot_node_core_pvf::prepare_worker_entrypoint(&cmd.socket_path); + Ok(()) + }, + Some(crate::cli::Subcommand::PvfExecuteWorker(cmd)) => { + let mut builder = sc_cli::LoggerBuilder::new(""); + builder.with_colors(false); + let _ = builder.init(); + + polkadot_node_core_pvf::execute_worker_entrypoint(&cmd.socket_path); + Ok(()) + }, None => { let runner = cli.create_runner(&cli.run)?; - runner - .run_node_until_exit(|config| async move { - match config.role { - Role::Light => service::new_light(config), - _ => service::new_full(config), - } - }) - .map_err(sc_cli::Error::Service) - } + + // some parameters that are used by polkadot nodes, but that are not used by our binary + // let jaeger_agent = None; + // let grandpa_pause = None; + // let no_beefy = true; + // let telemetry_worker_handler = None; + // let is_collator = crate::service::IsCollator::No; + let overseer_gen = crate::overseer::RealOverseerGen; + runner.run_node_until_exit(|config| async move { + match config.role { + Role::Light => Err(sc_cli::Error::Service(sc_service::Error::Other( + "Light client is not supported by this node".into(), + ))), + _ => crate::service::build_full(config, overseer_gen) + .map(|full| full.task_manager) + .map_err(service_error), + } + }) + }, } } + +// We don't want to change 'service.rs' too much to ease future updates => it'll keep using +// its own error enum like original polkadot service does. +fn service_error(err: crate::service::Error) -> sc_cli::Error { + sc_cli::Error::Application(Box::new(err)) +} diff --git a/polkadot/bridges/bin/rialto/node/src/main.rs b/polkadot/bridges/bin/rialto/node/src/main.rs index f319d1437a98fa41f19e8bd39266eef74787d3fb..824814224e548418d402e542eec8e33755b949e4 100644 --- a/polkadot/bridges/bin/rialto/node/src/main.rs +++ b/polkadot/bridges/bin/rialto/node/src/main.rs @@ -23,6 +23,8 @@ mod chain_spec; mod service; mod cli; mod command; +mod overseer; +mod parachains_db; /// Run the Rialto Node fn main() -> sc_cli::Result<()> { diff --git a/polkadot/bridges/bin/rialto/node/src/overseer.rs b/polkadot/bridges/bin/rialto/node/src/overseer.rs new file mode 100644 index 0000000000000000000000000000000000000000..9a7025e77c9b400b39d117c75d8db6254cd9db85 --- /dev/null +++ b/polkadot/bridges/bin/rialto/node/src/overseer.rs @@ -0,0 +1,316 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! This is almost 1:1 copy of `node/service/src/overseer.rs` file from Polkadot repository. +//! The only exception is that we don't support db upgrades => no `upgrade.rs` module. + +// this warning comes from `polkadot_overseer::AllSubsystems` type +#![allow(clippy::type_complexity)] + +use crate::service::{AuthorityDiscoveryApi, Error}; +use rialto_runtime::{opaque::Block, Hash}; + +use lru::LruCache; +use polkadot_availability_distribution::IncomingRequestReceivers; +use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig; +use polkadot_node_core_av_store::Config as AvailabilityConfig; +use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig; +use polkadot_node_core_chain_selection::Config as ChainSelectionConfig; +use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; +use polkadot_node_network_protocol::request_response::{v1 as request_v1, IncomingRequestReceiver}; +use polkadot_overseer::{ + metrics::Metrics as OverseerMetrics, BlockInfo, MetricsTrait, Overseer, OverseerBuilder, + OverseerConnector, OverseerHandle, +}; +use polkadot_primitives::v1::ParachainHost; +use sc_authority_discovery::Service as AuthorityDiscoveryService; +use sc_client_api::AuxStore; +use sc_keystore::LocalKeystore; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_consensus_babe::BabeApi; +use sp_core::traits::SpawnNamed; +use std::sync::Arc; +use substrate_prometheus_endpoint::Registry; + +pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem; +pub use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem; +pub use polkadot_availability_distribution::AvailabilityDistributionSubsystem; +pub use polkadot_availability_recovery::AvailabilityRecoverySubsystem; +pub use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide}; +pub use polkadot_dispute_distribution::DisputeDistributionSubsystem; +pub use polkadot_gossip_support::GossipSupport as GossipSupportSubsystem; +pub use polkadot_network_bridge::NetworkBridge as NetworkBridgeSubsystem; +pub use polkadot_node_collation_generation::CollationGenerationSubsystem; +pub use polkadot_node_core_approval_voting::ApprovalVotingSubsystem; +pub use polkadot_node_core_av_store::AvailabilityStoreSubsystem; +pub use polkadot_node_core_backing::CandidateBackingSubsystem; +pub use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem; +pub use polkadot_node_core_candidate_validation::CandidateValidationSubsystem; +pub use polkadot_node_core_chain_api::ChainApiSubsystem; +pub use polkadot_node_core_chain_selection::ChainSelectionSubsystem; +pub use polkadot_node_core_dispute_coordinator::DisputeCoordinatorSubsystem; +pub use polkadot_node_core_provisioner::ProvisionerSubsystem; +pub use polkadot_node_core_runtime_api::RuntimeApiSubsystem; +pub use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem; + +/// Arguments passed for overseer construction. +pub struct OverseerGenArgs<'a, Spawner, RuntimeClient> +where + RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, + RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + Spawner: 'static + SpawnNamed + Clone + Unpin, +{ + /// Set of initial relay chain leaves to track. + pub leaves: Vec, + /// The keystore to use for i.e. validator keys. + pub keystore: Arc, + /// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others. + pub runtime_client: Arc, + /// The underlying key value store for the parachains. + pub parachains_db: Arc, + /// Underlying network service implementation. + pub network_service: Arc>, + /// Underlying authority discovery service. + pub authority_discovery_service: AuthorityDiscoveryService, + /// POV request receiver + pub pov_req_receiver: IncomingRequestReceiver, + pub chunk_req_receiver: IncomingRequestReceiver, + pub collation_req_receiver: IncomingRequestReceiver, + pub available_data_req_receiver: + IncomingRequestReceiver, + pub statement_req_receiver: IncomingRequestReceiver, + pub dispute_req_receiver: IncomingRequestReceiver, + /// Prometheus registry, commonly used for production systems, less so for test. + pub registry: Option<&'a Registry>, + /// Task spawner to be used throughout the overseer and the APIs it provides. + pub spawner: Spawner, + /// Configuration for the approval voting subsystem. + pub approval_voting_config: ApprovalVotingConfig, + /// Configuration for the availability store subsystem. + pub availability_config: AvailabilityConfig, + /// Configuration for the candidate validation subsystem. + pub candidate_validation_config: CandidateValidationConfig, + /// Configuration for the chain selection subsystem. + pub chain_selection_config: ChainSelectionConfig, + /// Configuration for the dispute coordinator subsystem. + pub dispute_coordinator_config: DisputeCoordinatorConfig, +} + +/// Obtain a prepared `OverseerBuilder`, that is initialized +/// with all default values. +pub fn prepared_overseer_builder( + OverseerGenArgs { + leaves, + keystore, + runtime_client, + parachains_db, + network_service, + authority_discovery_service, + pov_req_receiver, + chunk_req_receiver, + collation_req_receiver: _, + available_data_req_receiver, + statement_req_receiver, + dispute_req_receiver, + registry, + spawner, + approval_voting_config, + availability_config, + candidate_validation_config, + chain_selection_config, + dispute_coordinator_config, + }: OverseerGenArgs<'_, Spawner, RuntimeClient>, +) -> Result< + OverseerBuilder< + Spawner, + Arc, + CandidateValidationSubsystem, + CandidateBackingSubsystem, + StatementDistributionSubsystem, + AvailabilityDistributionSubsystem, + AvailabilityRecoverySubsystem, + BitfieldSigningSubsystem, + BitfieldDistributionSubsystem, + ProvisionerSubsystem, + RuntimeApiSubsystem, + AvailabilityStoreSubsystem, + NetworkBridgeSubsystem< + Arc>, + AuthorityDiscoveryService, + >, + ChainApiSubsystem, + CollationGenerationSubsystem, + CollatorProtocolSubsystem, + ApprovalDistributionSubsystem, + ApprovalVotingSubsystem, + GossipSupportSubsystem, + DisputeCoordinatorSubsystem, + DisputeDistributionSubsystem, + ChainSelectionSubsystem, + >, + Error, +> +where + RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, + RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + Spawner: 'static + SpawnNamed + Clone + Unpin, +{ + use polkadot_node_subsystem_util::metrics::Metrics; + use std::iter::FromIterator; + + let metrics = ::register(registry)?; + + let builder = Overseer::builder() + .availability_distribution(AvailabilityDistributionSubsystem::new( + keystore.clone(), + IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver }, + Metrics::register(registry)?, + )) + .availability_recovery(AvailabilityRecoverySubsystem::with_chunks_only( + available_data_req_receiver, + Metrics::register(registry)?, + )) + .availability_store(AvailabilityStoreSubsystem::new( + parachains_db.clone(), + availability_config, + Metrics::register(registry)?, + )) + .bitfield_distribution(BitfieldDistributionSubsystem::new(Metrics::register(registry)?)) + .bitfield_signing(BitfieldSigningSubsystem::new( + spawner.clone(), + keystore.clone(), + Metrics::register(registry)?, + )) + .candidate_backing(CandidateBackingSubsystem::new( + spawner.clone(), + keystore.clone(), + Metrics::register(registry)?, + )) + .candidate_validation(CandidateValidationSubsystem::with_config( + candidate_validation_config, + Metrics::register(registry)?, // candidate-validation metrics + Metrics::register(registry)?, // validation host metrics + )) + .chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?)) + .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) + .collator_protocol(CollatorProtocolSubsystem::new(ProtocolSide::Validator { + keystore: keystore.clone(), + eviction_policy: Default::default(), + metrics: Metrics::register(registry)?, + })) + .network_bridge(NetworkBridgeSubsystem::new( + network_service.clone(), + authority_discovery_service.clone(), + Box::new(network_service.clone()), + Metrics::register(registry)?, + )) + .provisioner(ProvisionerSubsystem::new(spawner.clone(), (), Metrics::register(registry)?)) + .runtime_api(RuntimeApiSubsystem::new( + runtime_client.clone(), + Metrics::register(registry)?, + spawner.clone(), + )) + .statement_distribution(StatementDistributionSubsystem::new( + keystore.clone(), + statement_req_receiver, + Metrics::register(registry)?, + )) + .approval_distribution(ApprovalDistributionSubsystem::new(Metrics::register(registry)?)) + .approval_voting(ApprovalVotingSubsystem::with_config( + approval_voting_config, + parachains_db.clone(), + keystore.clone(), + Box::new(network_service), + Metrics::register(registry)?, + )) + .gossip_support(GossipSupportSubsystem::new( + keystore.clone(), + authority_discovery_service.clone(), + )) + .dispute_coordinator(DisputeCoordinatorSubsystem::new( + parachains_db.clone(), + dispute_coordinator_config, + keystore.clone(), + Metrics::register(registry)?, + )) + .dispute_distribution(DisputeDistributionSubsystem::new( + keystore, + dispute_req_receiver, + authority_discovery_service, + Metrics::register(registry)?, + )) + .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) + .leaves(Vec::from_iter( + leaves + .into_iter() + .map(|BlockInfo { hash, parent_hash: _, number }| (hash, number)), + )) + .activation_external_listeners(Default::default()) + .span_per_active_leaf(Default::default()) + .active_leaves(Default::default()) + .supports_parachains(runtime_client) + .known_leaves(LruCache::new(KNOWN_LEAVES_CACHE_SIZE)) + .metrics(metrics) + .spawner(spawner); + Ok(builder) +} + +/// Trait for the `fn` generating the overseer. +/// +/// Default behavior is to create an unmodified overseer, as `RealOverseerGen` +/// would do. +pub trait OverseerGen { + /// Overwrite the full generation of the overseer, including the subsystems. + fn generate( + &self, + connector: OverseerConnector, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, + ) -> Result<(Overseer>, OverseerHandle), Error> + where + RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, + RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + Spawner: 'static + SpawnNamed + Clone + Unpin, + { + let gen = RealOverseerGen; + RealOverseerGen::generate::(&gen, connector, args) + } + // It would be nice to make `create_subsystems` part of this trait, + // but the amount of generic arguments that would be required as + // as consequence make this rather annoying to implement and use. +} + +use polkadot_overseer::KNOWN_LEAVES_CACHE_SIZE; + +/// The regular set of subsystems. +pub struct RealOverseerGen; + +impl OverseerGen for RealOverseerGen { + fn generate( + &self, + connector: OverseerConnector, + args: OverseerGenArgs<'_, Spawner, RuntimeClient>, + ) -> Result<(Overseer>, OverseerHandle), Error> + where + RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, + RuntimeClient::Api: ParachainHost + BabeApi + AuthorityDiscoveryApi, + Spawner: 'static + SpawnNamed + Clone + Unpin, + { + prepared_overseer_builder(args)? + .build_with_connector(connector) + .map_err(|e| e.into()) + } +} diff --git a/polkadot/bridges/bin/rialto/node/src/parachains_db.rs b/polkadot/bridges/bin/rialto/node/src/parachains_db.rs new file mode 100644 index 0000000000000000000000000000000000000000..bf2052043c98797e5f2e594b75ada58397f4d109 --- /dev/null +++ b/polkadot/bridges/bin/rialto/node/src/parachains_db.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! This is almost 1:1 copy of `node/service/parachains_db/mod.rs` file from Polkadot repository. +//! The only exception is that we don't support db upgrades => no `upgrade.rs` module. + +use kvdb::KeyValueDB; +use std::{io, path::PathBuf, sync::Arc}; + +mod columns { + pub const NUM_COLUMNS: u32 = 5; + + pub const COL_AVAILABILITY_DATA: u32 = 0; + pub const COL_AVAILABILITY_META: u32 = 1; + pub const COL_APPROVAL_DATA: u32 = 2; + pub const COL_CHAIN_SELECTION_DATA: u32 = 3; + pub const COL_DISPUTE_COORDINATOR_DATA: u32 = 4; +} + +/// Columns used by different subsystems. +#[derive(Debug, Clone)] +pub struct ColumnsConfig { + /// The column used by the av-store for data. + pub col_availability_data: u32, + /// The column used by the av-store for meta information. + pub col_availability_meta: u32, + /// The column used by approval voting for data. + pub col_approval_data: u32, + /// The column used by chain selection for data. + pub col_chain_selection_data: u32, + /// The column used by dispute coordinator for data. + pub col_dispute_coordinator_data: u32, +} + +/// The real columns used by the parachains DB. +pub const REAL_COLUMNS: ColumnsConfig = ColumnsConfig { + col_availability_data: columns::COL_AVAILABILITY_DATA, + col_availability_meta: columns::COL_AVAILABILITY_META, + col_approval_data: columns::COL_APPROVAL_DATA, + col_chain_selection_data: columns::COL_CHAIN_SELECTION_DATA, + col_dispute_coordinator_data: columns::COL_DISPUTE_COORDINATOR_DATA, +}; + +/// The cache size for each column, in megabytes. +#[derive(Debug, Clone)] +pub struct CacheSizes { + /// Cache used by availability data. + pub availability_data: usize, + /// Cache used by availability meta. + pub availability_meta: usize, + /// Cache used by approval data. + pub approval_data: usize, +} + +impl Default for CacheSizes { + fn default() -> Self { + CacheSizes { availability_data: 25, availability_meta: 1, approval_data: 5 } + } +} + +fn other_io_error(err: String) -> io::Error { + io::Error::new(io::ErrorKind::Other, err) +} + +/// Open the database on disk, creating it if it doesn't exist. +pub fn open_creating(root: PathBuf, cache_sizes: CacheSizes) -> io::Result> { + use kvdb_rocksdb::{Database, DatabaseConfig}; + + let path = root.join("parachains").join("db"); + + let mut db_config = DatabaseConfig::with_columns(columns::NUM_COLUMNS); + + let _ = db_config + .memory_budget + .insert(columns::COL_AVAILABILITY_DATA, cache_sizes.availability_data); + let _ = db_config + .memory_budget + .insert(columns::COL_AVAILABILITY_META, cache_sizes.availability_meta); + let _ = db_config + .memory_budget + .insert(columns::COL_APPROVAL_DATA, cache_sizes.approval_data); + + let path_str = path + .to_str() + .ok_or_else(|| other_io_error(format!("Bad database path: {:?}", path)))?; + + std::fs::create_dir_all(&path_str)?; + let db = Database::open(&db_config, path_str)?; + + Ok(Arc::new(db)) +} diff --git a/polkadot/bridges/bin/rialto/node/src/service.rs b/polkadot/bridges/bin/rialto/node/src/service.rs index e29ff2576509dd5df3a9706e95d4a4b8c2ed8c0d..3349b09edb9f6b0dc939e50954a21b4d7552847d 100644 --- a/polkadot/bridges/bin/rialto/node/src/service.rs +++ b/polkadot/bridges/bin/rialto/node/src/service.rs @@ -14,33 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== -// UPDATE GUIDE: -// 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); -// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs; -// 3) fix compilation errors; -// 4) test :) -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== - -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - +//! Rialto chain node service. +//! +//! The code is mostly copy of `service/src/lib.rs` file from Polkadot repository +//! without optional functions, and with BEEFY added on top. + +use crate::overseer::{OverseerGen, OverseerGenArgs}; + +use polkadot_client::RuntimeApiCollection; +use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig; +use polkadot_node_core_av_store::Config as AvailabilityConfig; +use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig; +use polkadot_node_core_chain_selection::Config as ChainSelectionConfig; +use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; +use polkadot_node_network_protocol::request_response::IncomingRequest; +use polkadot_overseer::{BlockInfo, OverseerConnector}; +use polkadot_primitives::v1::BlockId; use rialto_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{ExecutorProvider, RemoteBackend}; -use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; -pub use sc_executor::NativeElseWasmExecutor; - -use sc_keystore::LocalKeystore; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_client_api::ExecutorProvider; +use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}; +use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; +use sc_service::{config::PrometheusConfig, Configuration, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; -use sp_consensus::SlotData; -use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; +use sp_api::{ConstructRuntimeApi, HeaderT}; +use sp_consensus::SelectChain; +use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; +use substrate_prometheus_endpoint::Registry; + +pub use polkadot_overseer::Handle; +pub use polkadot_primitives::v1::ParachainHost; +pub use sc_client_api::AuxStore; +pub use sp_authority_discovery::AuthorityDiscoveryApi; +pub use sp_blockchain::HeaderBackend; +pub use sp_consensus_babe::BabeApi; + +pub type Executor = NativeElseWasmExecutor; // Our native executor instance. pub struct ExecutorDispatch; @@ -57,31 +66,95 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { } } -type FullClient = sc_service::TFullClient>; +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Cli(#[from] sc_cli::Error), + + #[error(transparent)] + Blockchain(#[from] sp_blockchain::Error), + + #[error(transparent)] + Consensus(#[from] sp_consensus::Error), + + #[error(transparent)] + Service(#[from] sc_service::Error), + + #[error(transparent)] + Telemetry(#[from] sc_telemetry::Error), + + #[error("Failed to create an overseer")] + Overseer(#[from] polkadot_overseer::SubsystemError), + + #[error(transparent)] + Prometheus(#[from] substrate_prometheus_endpoint::PrometheusError), + + #[error("Authorities require the real overseer implementation")] + AuthoritiesRequireRealOverseer, + + #[error("Creating a custom database is required for validators")] + DatabasePathRequired, +} + +type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; +type FullGrandpaBlockImport = + sc_finality_grandpa::GrandpaBlockImport; +type FullTransactionPool = sc_transaction_pool::FullPool; +type FullBabeBlockImport = + sc_consensus_babe::BabeBlockImport; +type FullBabeLink = sc_consensus_babe::BabeLink; +type FullGrandpaLink = sc_finality_grandpa::LinkHalf; + +// If we're using prometheus, use a registry with a prefix of `polkadot`. +fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> { + if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() { + *registry = Registry::new_custom(Some("polkadot".into()), None)?; + } + Ok(()) +} + +// Needed here for complex return type while `impl Trait` in type aliases is unstable. #[allow(clippy::type_complexity)] pub fn new_partial( - config: &Configuration, + config: &mut Configuration, ) -> Result< sc_service::PartialComponents< FullClient, FullBackend, FullSelectChain, sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, + FullTransactionPool, ( - sc_finality_grandpa::GrandpaBlockImport, - sc_finality_grandpa::LinkHalf, + impl Fn( + sc_rpc::DenyUnsafe, + sc_rpc::SubscriptionTaskExecutor, + ) -> Result, sc_service::Error>, + ( + FullBabeBlockImport, + FullGrandpaLink, + FullBabeLink, + beefy_gadget::notification::BeefySignedCommitmentSender, + ), + sc_finality_grandpa::SharedVoterState, + std::time::Duration, Option, ), >, - ServiceError, -> { - if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); - } + Error, +> +where + RuntimeApi: ConstructRuntimeApi + Send + Sync + 'static, + >::RuntimeApi: + RuntimeApiCollection>, + ExecutorDispatch: NativeExecutionDispatch + 'static, +{ + set_prometheus_registry(config)?; let telemetry = config .telemetry_endpoints @@ -94,14 +167,22 @@ pub fn new_partial( }) .transpose()?; - let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - )?; + let executor = NativeElseWasmExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts::( + config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + executor, + )?; let client = Arc::new(client); let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); + task_manager.spawn_handle().spawn("telemetry", None, worker.run()); telemetry }); @@ -115,166 +196,429 @@ pub fn new_partial( client.clone(), ); - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + let (grandpa_block_import, grandpa_link) = + sc_finality_grandpa::block_import_with_authority_set_hard_forks( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + Vec::new(), + telemetry.as_ref().map(|x| x.handle()), + )?; + let justification_import = grandpa_block_import.clone(); + + let babe_config = sc_consensus_babe::Config::get_or_compute(&*client)?; + let (block_import, babe_link) = + sc_consensus_babe::block_import(babe_config.clone(), grandpa_block_import, client.clone())?; + + let slot_duration = babe_link.config().slot_duration(); + let import_queue = sc_consensus_babe::import_queue( + babe_link.clone(), + block_import.clone(), + Some(Box::new(justification_import)), client.clone(), - &(client.clone() as Arc<_>), select_chain.clone(), + move |_, ()| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot)) + }, + &task_manager.spawn_essential_handle(), + config.prometheus_registry(), + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), telemetry.as_ref().map(|x| x.handle()), )?; - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); + let justification_stream = grandpa_link.justification_stream(); + let shared_authority_set = grandpa_link.shared_authority_set().clone(); + let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); - let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import.clone())), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let (signed_commitment_sender, signed_commitment_stream) = + beefy_gadget::notification::BeefySignedCommitmentStream::channel(); + + let import_setup = (block_import, grandpa_link, babe_link, signed_commitment_sender); + let rpc_setup = shared_voter_state.clone(); + + let slot_duration = babe_config.slot_duration(); + + let rpc_extensions_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend = backend.clone(); + + move |deny_unsafe, + subscription_executor: sc_rpc::SubscriptionTaskExecutor| + -> Result, sc_service::Error> { + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, + let backend = backend.clone(); + let client = client.clone(); + let pool = transaction_pool.clone(); + + let shared_voter_state = shared_voter_state.clone(); + + let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service( + backend, + Some(shared_authority_set.clone()), ); - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(SystemApi::to_delegate(FullSystem::new( + client.clone(), + pool, + deny_unsafe, + ))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( + client.clone(), + ))); + io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( + shared_authority_set.clone(), + shared_voter_state, + justification_stream.clone(), + subscription_executor.clone(), + finality_proof_provider, + ))); + io.extend_with(beefy_gadget_rpc::BeefyApi::to_delegate( + beefy_gadget_rpc::BeefyRpcHandler::new( + signed_commitment_stream.clone(), + subscription_executor, + ), + )); + io.extend_with(pallet_mmr_rpc::MmrApi::to_delegate(pallet_mmr_rpc::Mmr::new(client))); + + Ok(io) + } + }; Ok(sc_service::PartialComponents { client, backend, task_manager, - import_queue, keystore_container, select_chain, + import_queue, transaction_pool, - other: (grandpa_block_import, grandpa_link, telemetry), + other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, telemetry), }) } -fn remote_keystore(_url: &str) -> Result, &'static str> { - // FIXME: here would the concrete keystore be built, - // must return a concrete type (NOT `LocalKeystore`) that - // implements `CryptoStore` and `SyncCryptoStore` - Err("Remote Keystore not supported.") +pub struct NewFull { + pub task_manager: TaskManager, + pub client: C, + pub overseer_handle: Option, + pub network: Arc::Hash>>, + pub rpc_handlers: sc_service::RpcHandlers, + pub backend: Arc, +} + +/// The maximum number of active leaves we forward to the [`Overseer`] on start up. +const MAX_ACTIVE_LEAVES: usize = 4; + +/// Returns the active leaves the overseer should start with. +async fn active_leaves( + select_chain: &sc_consensus::LongestChain, + client: &FullClient, +) -> Result, Error> +where + RuntimeApi: ConstructRuntimeApi + Send + Sync + 'static, + >::RuntimeApi: + RuntimeApiCollection>, + ExecutorDispatch: NativeExecutionDispatch + 'static, +{ + let best_block = select_chain.best_chain().await?; + + let mut leaves = select_chain + .leaves() + .await + .unwrap_or_default() + .into_iter() + .filter_map(|hash| { + let number = client.number(hash).ok()??; + + // Only consider leaves that are in maximum an uncle of the best block. + if number < best_block.number().saturating_sub(1) || hash == best_block.hash() { + return None + } + + let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash; + + Some(BlockInfo { hash, parent_hash, number }) + }) + .collect::>(); + + // Sort by block number and get the maximum number of leaves + leaves.sort_by_key(|b| b.number); + + leaves.push(BlockInfo { + hash: best_block.hash(), + parent_hash: *best_block.parent_hash(), + number: *best_block.number(), + }); + + Ok(leaves.into_iter().rev().take(MAX_ACTIVE_LEAVES).collect()) } -/// Builds a new service for a full client. -pub fn new_full(mut config: Configuration) -> Result { +// Create a new full node. +pub fn new_full( + mut config: Configuration, + program_path: Option, + overseer_gen: impl OverseerGen, +) -> Result>, Error> +where + RuntimeApi: ConstructRuntimeApi + Send + Sync + 'static, + >::RuntimeApi: + RuntimeApiCollection>, + ExecutorDispatch: NativeExecutionDispatch + 'static, +{ + let is_collator = false; + + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let backoff_authoring_blocks = + Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); + + let disable_grandpa = config.disable_grandpa; + let name = config.network.node_name.clone(); + let sc_service::PartialComponents { client, backend, mut task_manager, - import_queue, - mut keystore_container, + keystore_container, select_chain, + import_queue, transaction_pool, - other: (block_import, grandpa_link, mut telemetry), - } = new_partial(&config)?; - - if let Some(url) = &config.keystore_remote { - match remote_keystore(url) { - Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => { - return Err(ServiceError::Other(format!( - "Error hooking up remote keystore for {}: {}", - url, e - ))) - } - }; - } + other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry), + } = new_partial(&mut config)?; - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); + let prometheus_registry = config.prometheus_registry().cloned(); - let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: None, - block_announce_validator_builder: None, - })?; + let overseer_connector = OverseerConnector::default(); - if config.offchain_worker.enabled { - sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); + let shared_voter_state = rpc_setup; + let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; + + // Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change + // anything in terms of behaviour, but makes the logs more consistent with the other + // Substrate nodes. + config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + + config.network.extra_sets.push(beefy_gadget::beefy_peers_set_config()); + + { + use polkadot_network_bridge::{peer_sets_info, IsAuthority}; + let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No }; + config.network.extra_sets.extend(peer_sets_info(is_authority)); } - let role = config.role.clone(); - let force_authoring = config.force_authoring; - let backoff_authoring_blocks: Option<()> = None; - let name = config.network.node_name.clone(); - let enable_grandpa = !config.disable_grandpa; - let prometheus_registry = config.prometheus_registry().cloned(); + let (pov_req_receiver, cfg) = IncomingRequest::get_config_receiver(); + config.network.request_response_protocols.push(cfg); + let (chunk_req_receiver, cfg) = IncomingRequest::get_config_receiver(); + config.network.request_response_protocols.push(cfg); + let (collation_req_receiver, cfg) = IncomingRequest::get_config_receiver(); + config.network.request_response_protocols.push(cfg); + let (available_data_req_receiver, cfg) = IncomingRequest::get_config_receiver(); + config.network.request_response_protocols.push(cfg); + let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(); + config.network.request_response_protocols.push(cfg); + let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(); + config.network.request_response_protocols.push(cfg); + + let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + import_setup.1.shared_authority_set().clone(), + vec![], + )); - let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + let (network, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync: Some(warp_sync), + })?; - let rpc_extensions_builder = { - use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; + if config.offchain_worker.enabled { + let _ = sc_service::build_offchain_workers( + &config, + task_manager.spawn_handle(), + client.clone(), + network.clone(), + ); + } - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; - use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; - use sc_rpc::DenyUnsafe; - use substrate_frame_rpc_system::{FullSystem, SystemApi}; + let parachains_db = crate::parachains_db::open_creating( + config.database.path().ok_or(Error::DatabasePathRequired)?.into(), + crate::parachains_db::CacheSizes::default(), + )?; - let backend = backend.clone(); - let client = client.clone(); - let pool = transaction_pool.clone(); + let availability_config = AvailabilityConfig { + col_data: crate::parachains_db::REAL_COLUMNS.col_availability_data, + col_meta: crate::parachains_db::REAL_COLUMNS.col_availability_meta, + }; - let justification_stream = grandpa_link.justification_stream(); - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = shared_voter_state.clone(); + let approval_voting_config = ApprovalVotingConfig { + col_data: crate::parachains_db::REAL_COLUMNS.col_approval_data, + slot_duration_millis: slot_duration.as_millis() as u64, + }; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); + let candidate_validation_config = CandidateValidationConfig { + artifacts_cache_path: config + .database + .path() + .ok_or(Error::DatabasePathRequired)? + .join("pvf-artifacts"), + program_path: match program_path { + None => std::env::current_exe()?, + Some(p) => p, + }, + }; - Box::new(move |_, subscription_executor| { - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with(SystemApi::to_delegate(FullSystem::new( - client.clone(), - pool.clone(), - DenyUnsafe::No, - ))); - io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( - client.clone(), - ))); - io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( - shared_authority_set.clone(), - shared_voter_state.clone(), - justification_stream.clone(), - subscription_executor, - finality_proof_provider.clone(), - ))); + let chain_selection_config = ChainSelectionConfig { + col_data: crate::parachains_db::REAL_COLUMNS.col_chain_selection_data, + stagnant_check_interval: polkadot_node_core_chain_selection::StagnantCheckInterval::never(), + }; - io - }) + let dispute_coordinator_config = DisputeCoordinatorConfig { + col_data: crate::parachains_db::REAL_COLUMNS.col_dispute_coordinator_data, }; - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), + let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + config, + backend: backend.clone(), client: client.clone(), keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, + network: network.clone(), + rpc_extensions_builder: Box::new(rpc_extensions_builder), transaction_pool: transaction_pool.clone(), - rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, - backend, + task_manager: &mut task_manager, system_rpc_tx, - config, telemetry: telemetry.as_mut(), })?; + let (block_import, link_half, babe_link, signed_commitment_sender) = import_setup; + + let overseer_client = client.clone(); + let spawner = task_manager.spawn_handle(); + let active_leaves = futures::executor::block_on(active_leaves(&select_chain, &*client))?; + + let authority_discovery_service = if role.is_authority() || is_collator { + use futures::StreamExt; + use sc_network::Event; + + let authority_discovery_role = if role.is_authority() { + sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore()) + } else { + // don't publish our addresses when we're only a collator + sc_authority_discovery::Role::Discover + }; + let dht_event_stream = + network.event_stream("authority-discovery").filter_map(|e| async move { + match e { + Event::Dht(e) => Some(e), + _ => None, + } + }); + let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + ..Default::default() + }, + client.clone(), + network.clone(), + Box::pin(dht_event_stream), + authority_discovery_role, + prometheus_registry.clone(), + ); + + task_manager + .spawn_handle() + .spawn("authority-discovery-worker", None, worker.run()); + Some(service) + } else { + None + }; + + // we'd say let overseer_handler = + // authority_discovery_service.map(|authority_discovery_service|, ...), but in that case we + // couldn't use ? to propagate errors + let local_keystore = keystore_container.local_keystore(); + let maybe_params = + local_keystore.and_then(move |k| authority_discovery_service.map(|a| (a, k))); + + let overseer_handle = if let Some((authority_discovery_service, keystore)) = maybe_params { + let (overseer, overseer_handle) = overseer_gen + .generate::( + overseer_connector, + OverseerGenArgs { + leaves: active_leaves, + keystore, + runtime_client: overseer_client.clone(), + parachains_db, + availability_config, + approval_voting_config, + network_service: network.clone(), + authority_discovery_service, + registry: prometheus_registry.as_ref(), + spawner, + candidate_validation_config, + available_data_req_receiver, + chain_selection_config, + chunk_req_receiver, + collation_req_receiver, + dispute_coordinator_config, + dispute_req_receiver, + pov_req_receiver, + statement_req_receiver, + }, + )?; + let handle = Handle::new(overseer_handle); + + { + let handle = handle.clone(); + task_manager.spawn_essential_handle().spawn_blocking( + "overseer", + None, + Box::pin(async move { + use futures::{pin_mut, select, FutureExt}; + + let forward = polkadot_overseer::forward_events(overseer_client, handle); + + let forward = forward.fuse(); + let overseer_fut = overseer.run().fuse(); + + pin_mut!(overseer_fut); + pin_mut!(forward); + + select! { + _ = forward => (), + _ = overseer_fut => (), + complete => (), + } + }), + ); + } + + Some(handle) + } else { + None + }; + if role.is_authority() { - let proposer_factory = sc_basic_authorship::ProposerFactory::new( + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, @@ -282,208 +626,131 @@ pub fn new_full(mut config: Configuration) -> Result telemetry.as_ref().map(|x| x.handle()), ); - let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - let raw_slot_duration = slot_duration.slot_duration(); - - let aura = sc_consensus_aura::start_aura::(StartAuraParams { - slot_duration, - client, + let client_clone = client.clone(); + let overseer_handle = + overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone(); + let slot_duration = babe_link.config().slot_duration(); + let babe_config = sc_consensus_babe::BabeParams { + keystore: keystore_container.sync_keystore(), + client: client.clone(), select_chain, block_import, - proposer_factory, - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - raw_slot_duration, - ); - - Ok((timestamp, slot)) + env: proposer, + sync_oracle: network.clone(), + justification_sync_link: network.clone(), + create_inherent_data_providers: move |parent, ()| { + let client_clone = client_clone.clone(); + let overseer_handle = overseer_handle.clone(); + async move { + let parachain = polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::create( + &*client_clone, + overseer_handle, + parent, + ) + .await + .map_err(Box::new)?; + + let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( + &*client_clone, + parent, + )?; + + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + + let slot = sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration( + *timestamp, + slot_duration, + ); + + Ok((timestamp, slot, uncles, parachain)) + } }, force_authoring, backoff_authoring_blocks, - keystore: keystore_container.sync_keystore(), + babe_link, can_author_with, - sync_oracle: network.clone(), - justification_sync_link: network.clone(), - block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + block_proposal_slot_portion: sc_consensus_babe::SlotProportion::new(2f32 / 3f32), max_block_proposal_slot_portion: None, telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; + }; - // the AURA authoring task is considered essential, i.e. if it - // fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking("aura", aura); + let babe = sc_consensus_babe::start_babe(babe_config)?; + task_manager.spawn_essential_handle().spawn_blocking("babe", None, babe); } // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None + let keystore_opt = + if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; + + let beefy_params = beefy_gadget::BeefyParams { + client: client.clone(), + backend: backend.clone(), + key_store: keystore_opt.clone(), + network: network.clone(), + signed_commitment_sender, + min_block_delta: 2, + prometheus_registry: prometheus_registry.clone(), }; - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), + // Start the BEEFY bridge gadget. + task_manager.spawn_essential_handle().spawn_blocking( + "beefy-gadget", + None, + beefy_gadget::start_beefy_gadget::<_, _, _, _>(beefy_params), + ); + + let config = sc_finality_grandpa::Config { + // FIXME substrate#1578 make this available through chainspec + gossip_duration: Duration::from_millis(1000), justification_period: 512, name: Some(name), observer_enabled: false, - keystore, + keystore: keystore_opt, local_role: role, telemetry: telemetry.as_ref().map(|x| x.handle()), }; + let enable_grandpa = !disable_grandpa; if enable_grandpa { // start the full GRANDPA voter - // NOTE: non-authorities could run the GRANDPA observer protocol, but at - // this point the full voter should provide better guarantees of block - // and vote data availability than the observer. The observer has not - // been tested extensively yet and having most nodes in a network run it - // could lead to finality stalls. + // NOTE: unlike in substrate we are currently running the full + // GRANDPA voter protocol for all full nodes (regardless of whether + // they're validators or not). at this point the full voter should + // provide better guarantees of block and vote data availability than + // the observer. + + // add a custom voting rule to temporarily stop voting for new blocks + // after the given pause block is finalized and restarting after the + // given delay. + let builder = sc_finality_grandpa::VotingRulesBuilder::default(); + + let voting_rule = builder.build(); let grandpa_config = sc_finality_grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network, - voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), + config, + link: link_half, + network: network.clone(), + voting_rule, prometheus_registry, shared_voter_state, telemetry: telemetry.as_ref().map(|x| x.handle()), }; - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - task_manager - .spawn_essential_handle() - .spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?); + task_manager.spawn_essential_handle().spawn_blocking( + "grandpa-voter", + None, + sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, + ); } network_starter.start_network(); - Ok(task_manager) -} - -/// Builds a new service for a light client. -pub fn new_light(mut config: Configuration) -> Result { - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::( - &config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - )?; - - let mut telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", worker.run()); - telemetry - }); - - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( - config.transaction_pool.clone(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - on_demand.clone(), - )); - - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - telemetry.as_ref().map(|x| x.handle()), - )?; - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration(); - - let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import)), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration( - *timestamp, - slot_duration, - ); - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::NeverCanAuthor, - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; - - let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); - } - - let enable_grandpa = !config.disable_grandpa; - if enable_grandpa { - let name = config.network.node_name.clone(); - - let config = sc_finality_grandpa::Config { - gossip_duration: std::time::Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore: None, - local_role: config.role.clone(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - task_manager.spawn_handle().spawn_blocking( - "grandpa-observer", - sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?, - ); - } - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - remote_blockchain: Some(backend.remote_blockchain()), - transaction_pool, - task_manager: &mut task_manager, - on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_, _| ()), - config, - client, - keystore: keystore_container.sync_keystore(), - backend, - network, - system_rpc_tx, - telemetry: telemetry.as_mut(), - })?; + Ok(NewFull { task_manager, client, overseer_handle, network, rpc_handlers, backend }) +} - network_starter.start_network(); - Ok(task_manager) +pub fn build_full( + config: Configuration, + overseer_gen: impl OverseerGen, +) -> Result>, Error> { + new_full(config, None, overseer_gen) } diff --git a/polkadot/bridges/bin/rialto/runtime/Cargo.toml b/polkadot/bridges/bin/rialto/runtime/Cargo.toml index 4902c0c06a3df4bd5da28f5fcff1055793a23ec5..8298cdfbfbe835344f54af58fabbb27c2098469e 100644 --- a/polkadot/bridges/bin/rialto/runtime/Cargo.toml +++ b/polkadot/bridges/bin/rialto/runtime/Cargo.toml @@ -8,16 +8,15 @@ repository = "https://github.com/paritytech/parity-bridges-common/" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive"] } hex-literal = "0.3" -libsecp256k1 = { version = "0.3.4", optional = true, default-features = false, features = ["hmac"] } +libsecp256k1 = { version = "0.7", optional = true, default-features = false, features = ["hmac"] } log = { version = "0.4.14", default-features = false } -serde = { version = "1.0.124", optional = true, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0", optional = true, features = ["derive"] } # Bridge dependencies -bp-currency-exchange = { path = "../../../primitives/currency-exchange", default-features = false } -bp-eth-poa = { path = "../../../primitives/ethereum-poa", default-features = false } bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } bp-messages = { path = "../../../primitives/messages", default-features = false } @@ -25,47 +24,56 @@ bp-millau = { path = "../../../primitives/chain-millau", default-features = fals bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../../primitives/runtime", default-features = false } bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-currency-exchange = { path = "../../../modules/currency-exchange", default-features = false } pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } -pallet-bridge-eth-poa = { path = "../../../modules/ethereum", default-features = false } pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } # Substrate Dependencies -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +# Polkadot (parachain) Dependencies + +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } +polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } [dev-dependencies] -libsecp256k1 = { version = "0.3.4", features = ["hmac"] } +libsecp256k1 = { version = "0.7", features = ["hmac"] } [build-dependencies] substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -73,8 +81,7 @@ substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", bran [features] default = ["std"] std = [ - "bp-currency-exchange/std", - "bp-eth-poa/std", + "beefy-primitives/std", "bp-header-chain/std", "bp-message-dispatch/std", "bp-messages/std", @@ -89,24 +96,31 @@ std = [ "frame-system-rpc-runtime-api/std", "frame-system/std", "log/std", - "pallet-aura/std", + "pallet-authority-discovery/std", + "pallet-babe/std", "pallet-balances/std", - "pallet-bridge-currency-exchange/std", + "pallet-beefy/std", + "pallet-beefy-mmr/std", "pallet-bridge-dispatch/std", - "pallet-bridge-eth-poa/std", "pallet-bridge-grandpa/std", "pallet-bridge-messages/std", "pallet-grandpa/std", - "pallet-randomness-collective-flip/std", + "pallet-mmr/std", + "pallet-mmr-primitives/std", "pallet-shift-session-manager/std", "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", + "polkadot-primitives/std", + "polkadot-runtime-common/std", + "polkadot-runtime-parachains/std", + "scale-info/std", "serde", "sp-api/std", + "sp-authority-discovery/std", "sp-block-builder/std", - "sp-consensus-aura/std", + "sp-consensus-babe/std", "sp-core/std", "sp-finality-grandpa/std", "sp-inherents/std", @@ -125,8 +139,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "libsecp256k1", - "pallet-bridge-currency-exchange/runtime-benchmarks", - "pallet-bridge-eth-poa/runtime-benchmarks", "pallet-bridge-messages/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] diff --git a/polkadot/bridges/bin/rialto/runtime/src/benches.rs b/polkadot/bridges/bin/rialto/runtime/src/benches.rs deleted file mode 100644 index 86d6b8361c635da70ea231cdc51f00c7969a2879..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/benches.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! We want to use a different validator configuration for benchmarking than what's used in Kovan -//! or in our Rialto test network. However, we can't configure a new validator set on the fly which -//! means we need to wire the runtime together like this - -use pallet_bridge_eth_poa::{ValidatorsConfiguration, ValidatorsSource}; -use sp_std::vec; - -pub use crate::kovan::{ - genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval, PruningStrategy, -}; - -frame_support::parameter_types! { - pub BridgeValidatorsConfiguration: pallet_bridge_eth_poa::ValidatorsConfiguration = bench_validator_config(); -} - -fn bench_validator_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (1, ValidatorsSource::Contract([3; 20].into(), vec![[1; 20].into()])), - ]) -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs b/polkadot/bridges/bin/rialto/runtime/src/exchange.rs deleted file mode 100644 index 3b9c88112e4b4c90efbf801c7ac7f64030447440..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Support for PoA -> Substrate native tokens exchange. -//! -//! If you want to exchange native PoA tokens for native Substrate -//! chain tokens, you need to: -//! 1) send some PoA tokens to `LOCK_FUNDS_ADDRESS` address on PoA chain. Data field of -//! the transaction must be SCALE-encoded id of Substrate account that will receive -//! funds on Substrate chain; -//! 2) wait until the 'lock funds' transaction is mined on PoA chain; -//! 3) wait until the block containing the 'lock funds' transaction is finalized on PoA chain; -//! 4) wait until the required PoA header and its finality are provided -//! to the PoA -> Substrate bridge module (it can be provided by you); -//! 5) receive tokens by providing proof-of-inclusion of PoA transaction. - -use bp_currency_exchange::{ - Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction, Result as ExchangeResult, -}; -use bp_eth_poa::{transaction_decode_rlp, RawTransaction, RawTransactionReceipt}; -use codec::{Decode, Encode}; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use sp_std::vec::Vec; - -/// Ethereum address where locked PoA funds must be sent to. -pub const LOCK_FUNDS_ADDRESS: [u8; 20] = hex!("DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"); - -/// Ethereum transaction inclusion proof. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct EthereumTransactionInclusionProof { - /// Hash of the block with transaction. - pub block: sp_core::H256, - /// Index of the transaction within the block. - pub index: u64, - /// The proof itself (right now it is all RLP-encoded transactions of the block + - /// RLP-encoded receipts of all transactions of the block). - pub proof: Vec<(RawTransaction, RawTransactionReceipt)>, -} - -/// We uniquely identify transfer by the pair (sender, nonce). -/// -/// The assumption is that this pair will never appear more than once in -/// transactions included into finalized blocks. This is obviously true -/// for any existing eth-like chain (that keep current transaction format), -/// because otherwise transaction can be replayed over and over. -#[derive(Encode, Decode, PartialEq, RuntimeDebug)] -pub struct EthereumTransactionTag { - /// Account that has locked funds. - pub account: [u8; 20], - /// Lock transaction nonce. - pub nonce: sp_core::U256, -} - -/// Eth transaction from runtime perspective. -pub struct EthTransaction; - -impl MaybeLockFundsTransaction for EthTransaction { - type Transaction = RawTransaction; - type Id = EthereumTransactionTag; - type Recipient = crate::AccountId; - type Amount = crate::Balance; - - fn parse( - raw_tx: &Self::Transaction, - ) -> ExchangeResult> { - let tx = transaction_decode_rlp(raw_tx).map_err(|_| ExchangeError::InvalidTransaction)?; - - // we only accept transactions sending funds directly to the pre-configured address - if tx.unsigned.to != Some(LOCK_FUNDS_ADDRESS.into()) { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid peer recipient: {:?}", - tx.unsigned.to, - ); - - return Err(ExchangeError::InvalidTransaction); - } - - let mut recipient_raw = sp_core::H256::default(); - match tx.unsigned.payload.len() { - 32 => recipient_raw.as_fixed_bytes_mut().copy_from_slice(&tx.unsigned.payload), - len => { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid recipient length: {}", - len, - ); - - return Err(ExchangeError::InvalidRecipient); - } - } - let amount = tx.unsigned.value.low_u128(); - - if tx.unsigned.value != amount.into() { - log::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid amount: {}", - tx.unsigned.value, - ); - - return Err(ExchangeError::InvalidAmount); - } - - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: *tx.sender.as_fixed_bytes(), - nonce: tx.unsigned.nonce, - }, - recipient: crate::AccountId::from(*recipient_raw.as_fixed_bytes()), - amount, - }) - } -} - -/// Prepares everything required to bench claim of funds locked by given transaction. -#[cfg(feature = "runtime-benchmarks")] -pub(crate) fn prepare_environment_for_claim, I: frame_support::traits::Instance>( - transactions: &[(RawTransaction, RawTransactionReceipt)], -) -> bp_eth_poa::H256 { - use bp_eth_poa::compute_merkle_root; - use pallet_bridge_eth_poa::{ - test_utils::{insert_dummy_header, validator_utils::validator, HeaderBuilder}, - BridgeStorage, Storage, - }; - - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent_number_on_runtime::(0) - .transactions_root(compute_merkle_root(transactions.iter().map(|(tx, _)| tx))) - .receipts_root(compute_merkle_root(transactions.iter().map(|(_, receipt)| receipt))) - .sign_by(&validator(0)); - let header_id = header.compute_id(); - insert_dummy_header(&mut storage, header); - storage.finalize_and_prune_headers(Some(header_id), 0); - - header_id.hash -} - -/// Prepare signed ethereum lock-funds transaction. -#[cfg(any(feature = "runtime-benchmarks", test))] -pub(crate) fn prepare_ethereum_transaction( - recipient: &crate::AccountId, - editor: impl Fn(&mut bp_eth_poa::UnsignedTransaction), -) -> (RawTransaction, RawTransactionReceipt) { - use bp_eth_poa::{signatures::SignTransaction, Receipt, TransactionOutcome}; - - // prepare tx for OpenEthereum private dev chain: - // chain id is 0x11 - // sender secret is 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - let chain_id = 0x11; - let signer = secp256k1::SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .unwrap(); - let recipient_raw: &[u8; 32] = recipient.as_ref(); - let mut eth_tx = bp_eth_poa::UnsignedTransaction { - nonce: 0.into(), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: 100.into(), - gas: 100_000.into(), - gas_price: 100_000.into(), - payload: recipient_raw.to_vec(), - }; - editor(&mut eth_tx); - ( - eth_tx.sign_by(&signer, Some(chain_id)), - Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - fn ferdie() -> crate::AccountId { - hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c").into() - } - - #[test] - fn valid_transaction_accepted() { - assert_eq!( - EthTransaction::parse(&prepare_ethereum_transaction(&ferdie(), |_| {}).0), - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: hex!("00a329c0648769a73afac7f9381e08fb43dbea72"), - nonce: 0.into(), - }, - recipient: ferdie(), - amount: 100, - }), - ); - } - - #[test] - fn invalid_transaction_rejected() { - assert_eq!( - EthTransaction::parse(&Vec::new()), - Err(ExchangeError::InvalidTransaction), - ); - } - - #[test] - fn transaction_with_invalid_peer_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.to = None; - }) - .0 - ), - Err(ExchangeError::InvalidTransaction), - ); - } - - #[test] - fn transaction_with_invalid_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.payload.clear(); - }) - .0 - ), - Err(ExchangeError::InvalidRecipient), - ); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.value = sp_core::U256::from(u128::MAX) + sp_core::U256::from(1); - }) - .0 - ), - Err(ExchangeError::InvalidAmount), - ); - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs b/polkadot/bridges/bin/rialto/runtime/src/kovan.rs deleted file mode 100644 index 528c6205846f4c89cf6e3f1fc6836ba1d8b6d825..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy, ValidatorsConfiguration, - ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(16); - pub BridgeAuraConfiguration: AuraConfiguration = - kovan_aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - kovan_validators_configuration(); -} - -/// Max number of finalized headers to keep. It is equivalent of approximately -/// 24 hours of finalized blocks on current Kovan chain. -const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000; - -/// Aura engine configuration for Kovan chain. -pub fn kovan_aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::MAX, - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::MAX, - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::MAX, - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Kovan chain. -pub fn kovan_validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(genesis_validators())), - ( - 10960440, - ValidatorsSource::List(vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ]), - ), - ( - 10960500, - ValidatorsSource::Contract( - hex!("aE71807C1B0a093cB1547b682DC78316D945c9B8").into(), - vec![ - hex!("d05f7478c6aa10781258c5cc8b4f385fc8fa989c").into(), - hex!("03801efb0efe2a25ede5dd3a003ae880c0292e4d").into(), - hex!("a4df255ecf08bbf2c28055c65225c9a9847abd94").into(), - hex!("596e8221a30bfe6e7eff67fee664a01c73ba3c56").into(), - hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - ], - ), - ), - ]) -} - -/// Genesis validators set of Kovan chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("00427feae2419c15b89d1c21af10d1b6650a4d3d").into(), - hex!("4Ed9B08e6354C70fE6F8CB0411b0d3246b424d6c").into(), - hex!("0020ee4Be0e2027d76603cB751eE069519bA81A1").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("007733a1FE69CF3f2CF989F81C7b4cAc1693387A").into(), - hex!("00E6d2b931F55a3f1701c7389d592a7778897879").into(), - hex!("00e4a10650e5a6D6001C38ff8E64F97016a1645c").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ] -} - -/// Genesis header of the Kovan chain. -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), - extra_data: vec![], - state_root: hex!("2480155b48a1cea17d67dbfdfaafe821c1d19cdd478c5358e8ec56dec24502b2").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 6000000.into(), - difficulty: 131072.into(), - seal: vec![ - vec![128], - vec![ - 184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - ], - } -} - -/// Kovan headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl BridgePruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// PoA Header timestamp verification against `Timestamp` pallet. -#[derive(Default, RuntimeDebug)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Kovan Blockchain as seen by the runtime. -pub struct KovanBlockchain; - -impl InclusionProofVerifier for KovanBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { - let is_transaction_finalized = - crate::BridgeKovan::verify_transaction_finalized(proof.block, proof.index, &proof.proof); - - if !is_transaction_finalized { - return None; - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 0, - "10_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 20_000), - 0, - "20_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 30_000), - 10_000, - "20_000 <= 30_000 => we're ready to prune first 10_000 headers", - ); - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/lib.rs b/polkadot/bridges/bin/rialto/runtime/src/lib.rs index 601a513f0f9c56999c78601821a7351bbbc822cd..6a4cb90a4fdebe453013a4dedcb6caa0f6e088ba 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/lib.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/lib.rs @@ -30,34 +30,32 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -pub mod exchange; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benches; -pub mod kovan; pub mod millau_messages; -pub mod rialto_poa; +pub mod parachains; use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; +use beefy_primitives::{crypto::AuthorityId as BeefyId, mmr::MmrLeafVersion, ValidatorSet}; use bridge_runtime_common::messages::{ source::estimate_message_dispatch_and_delivery_fee, MessageBridge, }; -use codec::Decode; use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; -use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; +use pallet_mmr_primitives::{ + DataOrHash, EncodableOpaqueLeaf, Error as MmrError, LeafDataProvider, Proof as MmrProof, +}; +use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, - traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys}, + traits::{AccountIdLookup, Block as BlockT, Keccak256, NumberFor, OpaqueKeys}, transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, MultiSigner, + ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill, }; -use sp_std::prelude::*; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; @@ -72,8 +70,6 @@ pub use frame_support::{ pub use frame_system::Call as SystemCall; pub use pallet_balances::Call as BalancesCall; -pub use pallet_bridge_currency_exchange::Call as BridgeCurrencyExchangeCall; -pub use pallet_bridge_eth_poa::Call as BridgeEthPoACall; pub use pallet_bridge_grandpa::Call as BridgeGrandpaMillauCall; pub use pallet_bridge_messages::Call as MessagesCall; pub use pallet_sudo::Call as SudoCall; @@ -101,7 +97,7 @@ pub type AccountIndex = u32; pub type Balance = bp_rialto::Balance; /// Index of a transaction in the chain. -pub type Index = u32; +pub type Index = bp_rialto::Index; /// A hash of some data used by the chain. pub type Hash = bp_rialto::Hash; @@ -109,9 +105,6 @@ pub type Hash = bp_rialto::Hash; /// Hashing algorithm used by the chain. pub type Hashing = bp_rialto::Hasher; -/// Digest item type. -pub type DigestItem = generic::DigestItem; - /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades @@ -131,8 +124,12 @@ pub mod opaque { impl_opaque_keys! { pub struct SessionKeys { - pub aura: Aura, + pub babe: Babe, pub grandpa: Grandpa, + pub beefy: Beefy, + pub para_validator: Initializer, + pub para_assignment: SessionInfo, + pub authority_discovery: AuthorityDiscovery, } } @@ -171,7 +168,7 @@ impl frame_system::Config for Runtime { /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; + type Lookup = AccountIdLookup; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. @@ -213,62 +210,50 @@ impl frame_system::Config for Runtime { type OnSetCode = (); } -impl pallet_randomness_collective_flip::Config for Runtime {} +/// The BABE epoch configuration at genesis. +pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = + sp_consensus_babe::BabeEpochConfiguration { + c: bp_rialto::time_units::PRIMARY_PROBABILITY, + allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryVRFSlots, + }; parameter_types! { + pub const EpochDuration: u64 = bp_rialto::EPOCH_DURATION_IN_SLOTS as u64; + pub const ExpectedBlockTime: bp_rialto::Moment = bp_rialto::time_units::MILLISECS_PER_BLOCK; pub const MaxAuthorities: u32 = 10; } -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; +impl pallet_babe::Config for Runtime { + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; type MaxAuthorities = MaxAuthorities; -} -type RialtoPoA = pallet_bridge_eth_poa::Instance1; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = rialto_poa::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = rialto_poa::FinalityVotesCachingInterval; - type ValidatorsConfiguration = rialto_poa::BridgeValidatorsConfiguration; - type PruningStrategy = rialto_poa::PruningStrategy; - type ChainTime = rialto_poa::ChainTime; - type OnHeadersSubmitted = (); -} + // session module is the trigger + type EpochChangeTrigger = pallet_babe::ExternalTrigger; -type Kovan = pallet_bridge_eth_poa::Instance2; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = kovan::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = kovan::FinalityVotesCachingInterval; - type ValidatorsConfiguration = kovan::BridgeValidatorsConfiguration; - type PruningStrategy = kovan::PruningStrategy; - type ChainTime = kovan::ChainTime; - type OnHeadersSubmitted = (); -} + // equivocation related configuration - we don't expect any equivocations in our testnets + type KeyOwnerProofSystem = (); + type KeyOwnerProof = >::Proof; + type KeyOwnerIdentification = >::IdentificationTuple; + type HandleEquivocation = (); -type RialtoCurrencyExchange = pallet_bridge_currency_exchange::Instance1; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = rialto_poa::RialtoBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; + type DisabledValidators = (); + type WeightInfo = (); } -type KovanCurrencyExchange = pallet_bridge_currency_exchange::Instance2; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = kovan::KovanBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; +impl pallet_beefy::Config for Runtime { + type BeefyId = BeefyId; } impl pallet_bridge_dispatch::Config for Runtime { type Event = Event; - type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce); + type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); type Call = Call; type CallFilter = frame_support::traits::Everything; type EncodedCall = crate::millau_messages::FromMillauEncodedCall; @@ -278,71 +263,10 @@ impl pallet_bridge_dispatch::Config for Runtime { type AccountIdConverter = bp_rialto::AccountIdConverter; } -pub struct DepositInto; - -impl bp_currency_exchange::DepositInto for DepositInto { - type Recipient = AccountId; - type Amount = Balance; - - fn deposit_into( - recipient: Self::Recipient, - amount: Self::Amount, - ) -> bp_currency_exchange::Result<()> { - // let balances module make all checks for us (it won't allow depositing lower than existential - // deposit, balance overflow, ...) - let deposited = as Currency>::deposit_creating( - &recipient, amount, - ); - - // I'm dropping deposited here explicitly to illustrate the fact that it'll update `TotalIssuance` - // on drop - let deposited_amount = deposited.peek(); - drop(deposited); - - // we have 3 cases here: - // - deposited == amount: success - // - deposited == 0: deposit has failed and no changes to storage were made - // - deposited != 0: (should never happen in practice) deposit has been partially completed - match deposited_amount { - _ if deposited_amount == amount => { - log::trace!( - target: "runtime", - "Deposited {} to {:?}", - amount, - recipient, - ); - - Ok(()) - }, - _ if deposited_amount == 0 => { - log::error!( - target: "runtime", - "Deposit of {} to {:?} has failed", - amount, - recipient, - ); - - Err(bp_currency_exchange::Error::DepositFailed) - }, - _ => { - log::error!( - target: "runtime", - "Deposit of {} to {:?} has partially competed. {} has been deposited", - amount, - recipient, - deposited_amount, - ); - - // we can't return DepositFailed error here, because storage changes were made - Err(bp_currency_exchange::Error::DepositPartiallyFailed) - }, - } - } -} - impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; + type MaxAuthorities = MaxAuthorities; type KeyOwnerProofSystem = (); type KeyOwnerProof = >::Proof; @@ -356,14 +280,46 @@ impl pallet_grandpa::Config for Runtime { type MaxAuthorities = MaxAuthorities; } +impl pallet_mmr::Config for Runtime { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + type Hashing = Keccak256; + type Hash = ::Output; + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + type WeightInfo = (); + type LeafData = pallet_beefy_mmr::Pallet; +} + +parameter_types! { + /// Version of the produced MMR leaf. + /// + /// The version consists of two parts; + /// - `major` (3 bits) + /// - `minor` (5 bits) + /// + /// `major` should be updated only if decoding the previous MMR Leaf format from the payload + /// is not possible (i.e. backward incompatible change). + /// `minor` should be updated if fields are added to the previous MMR Leaf, which given SCALE + /// encoding does not prevent old leafs from being decoded. + /// + /// Hence we expect `major` to be changed really rarely (think never). + /// See [`MmrLeafVersion`] type documentation for more details. + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); +} + +impl pallet_beefy_mmr::Config for Runtime { + type LeafVersion = LeafVersion; + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + type ParachainHeads = (); +} + parameter_types! { pub const MinimumPeriod: u64 = bp_rialto::SLOT_DURATION / 2; } impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the Unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; + /// A timestamp: milliseconds since the UNIX epoch. + type Moment = bp_rialto::Moment; + type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) type WeightInfo = (); @@ -395,13 +351,25 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const TransactionBaseFee: Balance = 0; pub const TransactionByteFee: Balance = 1; + pub const OperationalFeeMultiplier: u8 = 5; + // values for following parameters are copied from polkadot repo, but it is fine + // not to sync them - we're not going to make Rialto a full copy of one of Polkadot-like chains + pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); + pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); + pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; type TransactionByteFee = TransactionByteFee; - type WeightToFee = IdentityFee; - type FeeMultiplierUpdate = (); + type OperationalFeeMultiplier = OperationalFeeMultiplier; + type WeightToFee = bp_rialto::WeightToFee; + type FeeMultiplierUpdate = pallet_transaction_payment::TargetedFeeAdjustment< + Runtime, + TargetBlockFullness, + AdjustmentVariable, + MinimumMultiplier, + >; } impl pallet_sudo::Config for Runtime { @@ -409,17 +377,12 @@ impl pallet_sudo::Config for Runtime { type Call = Call; } -parameter_types! { - pub const Period: BlockNumber = bp_rialto::SESSION_LENGTH; - pub const Offset: BlockNumber = 0; -} - impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = ::AccountId; type ValidatorIdOf = (); - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; + type ShouldEndSession = Babe; + type NextSessionRotation = Babe; type SessionManager = pallet_shift_session_manager::Pallet; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; @@ -427,6 +390,10 @@ impl pallet_session::Config for Runtime { type WeightInfo = (); } +impl pallet_authority_discovery::Config for Runtime { + type MaxAuthorities = MaxAuthorities; +} + parameter_types! { /// This is a pretty unscientific cap. /// @@ -475,10 +442,11 @@ parameter_types! { pub const GetDeliveryConfirmationTransactionFee: Balance = bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; pub const RootAccountForPayments: Option = None; + pub const BridgedChainId: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID; } /// Instance of the messages pallet used to relay messages to/from Millau chain. -pub type WithMillauMessagesInstance = pallet_bridge_messages::DefaultInstance; +pub type WithMillauMessagesInstance = (); impl pallet_bridge_messages::Config for Runtime { type Event = Event; @@ -502,14 +470,17 @@ impl pallet_bridge_messages::Config for Runtime { type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments< Runtime, + (), pallet_balances::Pallet, GetDeliveryConfirmationTransactionFee, RootAccountForPayments, >; + type OnMessageAccepted = (); type OnDeliveryConfirmed = (); type SourceHeaderChain = crate::millau_messages::Millau; type MessageDispatch = crate::millau_messages::FromMillauMessageDispatch; + type BridgedChainId = BridgedChainId; } construct_runtime!( @@ -518,28 +489,55 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - BridgeRialtoPoa: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeKovan: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, - BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, - BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event}, System: frame_system::{Pallet, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + + // Must be before session. + Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Aura: pallet_aura::{Pallet, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + + // Consensus support. + AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, ShiftSessionManager: pallet_shift_session_manager::{Pallet}, + + // BEEFY Bridges support. + Beefy: pallet_beefy::{Pallet, Storage, Config}, + Mmr: pallet_mmr::{Pallet, Storage}, + MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, + + // Millau bridge modules. + BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, + BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, + BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, + + // Parachain modules. + ParachainsOrigin: polkadot_runtime_parachains::origin::{Pallet, Origin}, + Configuration: polkadot_runtime_parachains::configuration::{Pallet, Call, Storage, Config}, + Shared: polkadot_runtime_parachains::shared::{Pallet, Call, Storage}, + Inclusion: polkadot_runtime_parachains::inclusion::{Pallet, Call, Storage, Event}, + ParasInherent: polkadot_runtime_parachains::paras_inherent::{Pallet, Call, Storage, Inherent}, + Scheduler: polkadot_runtime_parachains::scheduler::{Pallet, Storage}, + Paras: polkadot_runtime_parachains::paras::{Pallet, Call, Storage, Event, Config}, + Initializer: polkadot_runtime_parachains::initializer::{Pallet, Call, Storage}, + Dmp: polkadot_runtime_parachains::dmp::{Pallet, Call, Storage}, + Ump: polkadot_runtime_parachains::ump::{Pallet, Call, Storage, Event}, + Hrmp: polkadot_runtime_parachains::hrmp::{Pallet, Call, Storage, Event, Config}, + SessionInfo: polkadot_runtime_parachains::session_info::{Pallet, Storage}, + + // Parachain Onboarding Pallets + Registrar: polkadot_runtime_common::paras_registrar::{Pallet, Call, Storage, Event}, + Slots: polkadot_runtime_common::slots::{Pallet, Call, Storage, Event}, + ParasSudoWrapper: polkadot_runtime_common::paras_sudo_wrapper::{Pallet, Call}, } ); /// The address format for describing accounts. -pub type Address = AccountId; +pub type Address = sp_runtime::MultiAddress; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. @@ -570,7 +568,7 @@ pub type Executive = frame_executive::Executive< Block, frame_system::ChainContext, Runtime, - AllPalletsWithSystem, + AllPallets, >; impl_runtime_apis! { @@ -621,43 +619,42 @@ impl_runtime_apis! { } } - impl bp_eth_poa::RialtoPoAHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeRialtoPoa::best_block(); - (best_block.number, best_block.hash) - } - - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeRialtoPoa::finalized_block(); - (finalized_block.number, finalized_block.hash) - } - - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeRialtoPoa::is_import_requires_receipts(header) - } - - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeRialtoPoa::is_known_block(hash) + impl beefy_primitives::BeefyApi for Runtime { + fn validator_set() -> ValidatorSet { + Beefy::validator_set() } } - impl bp_eth_poa::KovanHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeKovan::best_block(); - (best_block.number, best_block.hash) - } - - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeKovan::finalized_block(); - (finalized_block.number, finalized_block.hash) + impl pallet_mmr_primitives::MmrApi for Runtime { + fn generate_proof(leaf_index: u64) + -> Result<(EncodableOpaqueLeaf, MmrProof), MmrError> + { + Mmr::generate_proof(leaf_index) + .map(|(leaf, proof)| (EncodableOpaqueLeaf::from_leaf(&leaf), proof)) } - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeKovan::is_import_requires_receipts(header) + fn verify_proof(leaf: EncodableOpaqueLeaf, proof: MmrProof) + -> Result<(), MmrError> + { + pub type Leaf = < + ::LeafData as LeafDataProvider + >::LeafData; + + let leaf: Leaf = leaf + .into_opaque_leaf() + .try_decode() + .ok_or(MmrError::Verify)?; + Mmr::verify_leaf(leaf, proof) } - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeKovan::is_known_block(hash) + fn verify_proof_stateless( + root: Hash, + leaf: EncodableOpaqueLeaf, + proof: MmrProof + ) -> Result<(), MmrError> { + type MmrHashing = ::Hashing; + let node = DataOrHash::Data(leaf.into_opaque_leaf()); + pallet_mmr::verify_leaf_proof::(root, node, proof) } } @@ -672,18 +669,6 @@ impl_runtime_apis! { } } - impl bp_currency_exchange::RialtoCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeRialtoCurrencyExchange::filter_transaction_proof(&proof) - } - } - - impl bp_currency_exchange::KovanCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeKovanCurrencyExchange::filter_transaction_proof(&proof) - } - } - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction( source: TransactionSource, @@ -700,13 +685,152 @@ impl_runtime_apis! { } } - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { + // The choice of `c` parameter (where `1 - c` represents the + // probability of a slot being empty), is done in accordance to the + // slot duration and expected target block time, for safely + // resisting network delays of maximum two seconds. + // + sp_consensus_babe::BabeGenesisConfiguration { + slot_duration: Babe::slot_duration(), + epoch_length: EpochDuration::get(), + c: BABE_GENESIS_EPOCH_CONFIG.c, + genesis_authorities: Babe::authorities().to_vec(), + randomness: Babe::randomness(), + allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots, + } + } + + fn current_epoch_start() -> sp_consensus_babe::Slot { + Babe::current_epoch_start() + } + + fn current_epoch() -> sp_consensus_babe::Epoch { + Babe::current_epoch() + } + + fn next_epoch() -> sp_consensus_babe::Epoch { + Babe::next_epoch() + } + + fn generate_key_ownership_proof( + _slot: sp_consensus_babe::Slot, + _authority_id: sp_consensus_babe::AuthorityId, + ) -> Option { + None + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: sp_consensus_babe::EquivocationProof<::Header>, + key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + } + + impl polkadot_primitives::v1::ParachainHost for Runtime { + fn validators() -> Vec { + polkadot_runtime_parachains::runtime_api_impl::v1::validators::() + } + + fn validator_groups() -> ( + Vec>, + polkadot_primitives::v1::GroupRotationInfo, + ) { + polkadot_runtime_parachains::runtime_api_impl::v1::validator_groups::() + } + + fn availability_cores() -> Vec> { + polkadot_runtime_parachains::runtime_api_impl::v1::availability_cores::() + } + + fn persisted_validation_data( + para_id: polkadot_primitives::v1::Id, + assumption: polkadot_primitives::v1::OccupiedCoreAssumption, + ) + -> Option> { + polkadot_runtime_parachains::runtime_api_impl::v1::persisted_validation_data::(para_id, assumption) + } + + fn assumed_validation_data( + para_id: polkadot_primitives::v1::Id, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(polkadot_primitives::v1::PersistedValidationData, polkadot_primitives::v1::ValidationCodeHash)> { + polkadot_runtime_parachains::runtime_api_impl::v1::assumed_validation_data::(para_id, expected_persisted_validation_data_hash) + } + + fn check_validation_outputs( + para_id: polkadot_primitives::v1::Id, + outputs: polkadot_primitives::v1::CandidateCommitments, + ) -> bool { + polkadot_runtime_parachains::runtime_api_impl::v1::check_validation_outputs::(para_id, outputs) } - fn authorities() -> Vec { - Aura::authorities() + fn session_index_for_child() -> polkadot_primitives::v1::SessionIndex { + polkadot_runtime_parachains::runtime_api_impl::v1::session_index_for_child::() + } + + fn validation_code( + para_id: polkadot_primitives::v1::Id, + assumption: polkadot_primitives::v1::OccupiedCoreAssumption, + ) + -> Option { + polkadot_runtime_parachains::runtime_api_impl::v1::validation_code::(para_id, assumption) + } + + fn candidate_pending_availability( + para_id: polkadot_primitives::v1::Id, + ) -> Option> { + polkadot_runtime_parachains::runtime_api_impl::v1::candidate_pending_availability::(para_id) + } + + fn candidate_events() -> Vec> { + polkadot_runtime_parachains::runtime_api_impl::v1::candidate_events::(|ev| { + match ev { + Event::Inclusion(ev) => { + Some(ev) + } + _ => None, + } + }) + } + + fn session_info(index: polkadot_primitives::v1::SessionIndex) -> Option { + polkadot_runtime_parachains::runtime_api_impl::v1::session_info::(index) + } + + fn dmq_contents( + recipient: polkadot_primitives::v1::Id, + ) -> Vec> { + polkadot_runtime_parachains::runtime_api_impl::v1::dmq_contents::(recipient) + } + + fn inbound_hrmp_channels_contents( + recipient: polkadot_primitives::v1::Id + ) -> BTreeMap>> { + polkadot_runtime_parachains::runtime_api_impl::v1::inbound_hrmp_channels_contents::(recipient) + } + + fn validation_code_by_hash( + hash: polkadot_primitives::v1::ValidationCodeHash, + ) -> Option { + polkadot_runtime_parachains::runtime_api_impl::v1::validation_code_by_hash::(hash) + } + + fn on_chain_votes() -> Option> { + polkadot_runtime_parachains::runtime_api_impl::v1::on_chain_votes::() + } + } + + impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { + fn authorities() -> Vec { + polkadot_runtime_parachains::runtime_api_impl::v1::relevant_authority_ids::() } } @@ -735,6 +859,10 @@ impl_runtime_apis! { } impl fg_primitives::GrandpaApi for Runtime { + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + fn grandpa_authorities() -> GrandpaAuthorityList { Grandpa::grandpa_authorities() } @@ -781,20 +909,11 @@ impl_runtime_apis! { begin: bp_messages::MessageNonce, end: bp_messages::MessageNonce, ) -> Vec> { - (begin..=end).filter_map(|nonce| { - let message_data = BridgeMillauMessages::outbound_message_data(lane, nonce)?; - let decoded_payload = millau_messages::ToMillauMessagePayload::decode( - &mut &message_data.payload[..] - ).ok()?; - Some(bp_messages::MessageDetails { - nonce, - dispatch_weight: decoded_payload.weight, - size: message_data.payload.len() as _, - delivery_and_dispatch_fee: message_data.fee, - dispatch_fee_payment: decoded_payload.dispatch_fee_payment, - }) - }) - .collect() + bridge_runtime_common::messages_api::outbound_message_details::< + Runtime, + WithMillauMessagesInstance, + WithMillauMessageBridge, + >(lane, begin, end) } fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { @@ -829,18 +948,11 @@ impl_runtime_apis! { use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; - list_benchmark!( - list, - extra, - pallet_bridge_currency_exchange, - BridgeCurrencyExchangeBench:: - ); - list_benchmark!( - list, - extra, - pallet_bridge_messages, - MessagesBench:: - ); + use pallet_bridge_messages::benchmarking::Pallet as MessagesBench; + + let mut list = Vec::::new(); + + list_benchmark!(list, extra, pallet_bridge_messages, MessagesBench::); list_benchmark!(list, extra, pallet_bridge_grandpa, BridgeMillauGrandpa); let storage_info = AllPalletsWithSystem::storage_info(); @@ -873,46 +985,6 @@ impl_runtime_apis! { let mut batches = Vec::::new(); let params = (&config, &whitelist); - use pallet_bridge_currency_exchange::benchmarking::{ - Pallet as BridgeCurrencyExchangeBench, - Config as BridgeCurrencyExchangeConfig, - ProofParams as BridgeCurrencyExchangeProofParams, - }; - - impl BridgeCurrencyExchangeConfig for Runtime { - fn make_proof( - proof_params: BridgeCurrencyExchangeProofParams, - ) -> crate::exchange::EthereumTransactionInclusionProof { - use bp_currency_exchange::DepositInto; - - if proof_params.recipient_exists { - >::DepositInto::deposit_into( - proof_params.recipient.clone(), - ExistentialDeposit::get(), - ).unwrap(); - } - - let (transaction, receipt) = crate::exchange::prepare_ethereum_transaction( - &proof_params.recipient, - |tx| { - // our runtime only supports transactions where data is exactly 32 bytes long - // (receiver key) - // => we are ignoring `transaction_size_factor` here - tx.value = (ExistentialDeposit::get() * 10).into(); - }, - ); - let transactions = sp_std::iter::repeat((transaction, receipt)) - .take(1 + proof_params.proof_size_factor as usize) - .collect::>(); - let block_hash = crate::exchange::prepare_environment_for_claim::(&transactions); - crate::exchange::EthereumTransactionInclusionProof { - block: block_hash, - index: 0, - proof: transactions, - } - } - } - use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; use bp_runtime::messages::DispatchFeePayment; use bridge_runtime_common::messages; @@ -981,7 +1053,7 @@ impl_runtime_apis! { MessagesProofSize::Minimal(ref size) => vec![0u8; *size as _], _ => vec![], }; - let call = Call::System(SystemCall::remark(remark)); + let call = Call::System(SystemCall::remark { remark }); let call_weight = call.get_dispatch_info().weight; let millau_account_id: bp_millau::AccountId = Default::default(); @@ -1001,14 +1073,12 @@ impl_runtime_apis! { Self::endow_account(&rialto_public.clone().into_account()); } - let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key::< - ::BridgedMessagesInstance, - >( + let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key( + ::BRIDGED_MESSAGES_PALLET_NAME, &message_key.lane_id, message_key.nonce, ).0; - let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key::< - ::BridgedMessagesInstance, - >( + let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key( + ::BRIDGED_MESSAGES_PALLET_NAME, &lane_id, ).0; @@ -1054,9 +1124,8 @@ impl_runtime_apis! { prepare_message_delivery_proof::( params, - |lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key::< - ::BridgedMessagesInstance, - >( + |lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key( + ::BRIDGED_MESSAGES_PALLET_NAME, &lane_id, ).0, |state_root| bp_millau::Header::new( @@ -1082,12 +1151,6 @@ impl_runtime_apis! { } } - add_benchmark!( - params, - batches, - pallet_bridge_currency_exchange, - BridgeCurrencyExchangeBench:: - ); add_benchmark!( params, batches, @@ -1104,8 +1167,8 @@ impl_runtime_apis! { /// Millau account ownership digest from Rialto. /// /// The byte vector returned by this function should be signed with a Millau account private key. -/// This way, the owner of `rialto_account_id` on Rialto proves that the Millau account private key -/// is also under his control. +/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private +/// key is also under his control. pub fn rialto_to_millau_account_ownership_digest( millau_call: &Call, rialto_account_id: AccountId, @@ -1128,48 +1191,8 @@ where #[cfg(test)] mod tests { use super::*; - use bp_currency_exchange::DepositInto; use bridge_runtime_common::messages; - fn run_deposit_into_test(test: impl Fn(AccountId) -> Balance) { - let mut ext: sp_io::TestExternalities = - SystemConfig::default().build_storage::().unwrap().into(); - ext.execute_with(|| { - // initially issuance is zero - assert_eq!( - as Currency>::total_issuance(), - 0, - ); - - // create account - let account: AccountId = [1u8; 32].into(); - let initial_amount = ExistentialDeposit::get(); - let deposited = - as Currency>::deposit_creating( - &account, - initial_amount, - ); - drop(deposited); - assert_eq!( - as Currency>::total_issuance(), - initial_amount, - ); - assert_eq!( - as Currency>::free_balance(&account), - initial_amount, - ); - - // run test - let total_issuance_change = test(account); - - // check that total issuance has changed by `run_deposit_into_test` - assert_eq!( - as Currency>::total_issuance(), - initial_amount + total_issuance_change, - ); - }); - } - #[test] fn ensure_rialto_message_lane_weights_are_correct() { type Weights = pallet_bridge_messages::weights::RialtoWeight; @@ -1179,6 +1202,7 @@ mod tests { bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT, + DbWeight::get(), ); let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add( @@ -1206,50 +1230,16 @@ mod tests { max_incoming_inbound_lane_data_proof_size, bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + DbWeight::get(), ); } #[test] - fn deposit_into_existing_account_works() { - run_deposit_into_test(|existing_account| { - let initial_amount = - as Currency>::free_balance( - &existing_account, - ); - let additional_amount = 10_000; - >::DepositInto::deposit_into( - existing_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance( - &existing_account - ), - initial_amount + additional_amount, - ); - additional_amount - }); - } - - #[test] - fn deposit_into_new_account_works() { - run_deposit_into_test(|_| { - let initial_amount = 0; - let additional_amount = ExistentialDeposit::get() + 10_000; - let new_account: AccountId = [42u8; 32].into(); - >::DepositInto::deposit_into( - new_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance( - &new_account - ), - initial_amount + additional_amount, - ); - additional_amount - }); + fn call_size() { + const DOT_MAX_CALL_SZ: usize = 230; + assert!(core::mem::size_of::>() <= DOT_MAX_CALL_SZ); + // FIXME: get this down to 230. https://github.com/paritytech/grandpa-bridge-gadget/issues/359 + const BEEFY_MAX_CALL_SZ: usize = 232; + assert!(core::mem::size_of::>() <= BEEFY_MAX_CALL_SZ); } } diff --git a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs index bf97478a0aa204b34e594f6a0c829821f5a0126f..13a1c6b06ec21a215b9d16599540c816a1c8b23b 100644 --- a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs +++ b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs @@ -31,25 +31,34 @@ use frame_support::{ weights::{DispatchClass, Weight}, RuntimeDebug, }; -use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128}; +use scale_info::TypeInfo; +use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128}; use sp_std::{convert::TryFrom, ops::RangeInclusive}; /// Initial value of `MillauToRialtoConversionRate` parameter. -pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV); +pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 = + FixedU128::from_inner(FixedU128::DIV); +/// Initial value of `MillauFeeMultiplier` parameter. +pub const INITIAL_MILLAU_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV); parameter_types! { /// Millau to Rialto conversion rate. Initially we treat both tokens as equal. pub storage MillauToRialtoConversionRate: FixedU128 = INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE; + /// Fee multiplier value at Millau chain. + pub storage MillauFeeMultiplier: FixedU128 = INITIAL_MILLAU_FEE_MULTIPLIER; } /// Message payload for Rialto -> Millau messages. -pub type ToMillauMessagePayload = messages::source::FromThisChainMessagePayload; +pub type ToMillauMessagePayload = + messages::source::FromThisChainMessagePayload; /// Message verifier for Rialto -> Millau messages. -pub type ToMillauMessageVerifier = messages::source::FromThisChainMessageVerifier; +pub type ToMillauMessageVerifier = + messages::source::FromThisChainMessageVerifier; /// Message payload for Millau -> Rialto messages. -pub type FromMillauMessagePayload = messages::target::FromBridgedChainMessagePayload; +pub type FromMillauMessagePayload = + messages::target::FromBridgedChainMessagePayload; /// Encoded Rialto Call as it comes from Millau. pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; @@ -59,14 +68,15 @@ pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDi WithMillauMessageBridge, crate::Runtime, pallet_balances::Pallet, - pallet_bridge_dispatch::DefaultInstance, + (), >; /// Messages proof for Millau -> Rialto messages. pub type FromMillauMessagesProof = messages::target::FromBridgedChainMessagesProof; /// Messages delivery proof for Rialto -> Millau messages. -pub type ToMillauMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof; +pub type ToMillauMessagesDeliveryProof = + messages::source::FromBridgedChainMessagesDeliveryProof; /// Millau <-> Rialto message bridge. #[derive(RuntimeDebug, Clone, Copy)] @@ -76,14 +86,16 @@ impl MessageBridge for WithMillauMessageBridge { const RELAYER_FEE_PERCENT: u32 = 10; const THIS_CHAIN_ID: ChainId = RIALTO_CHAIN_ID; const BRIDGED_CHAIN_ID: ChainId = MILLAU_CHAIN_ID; + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME; type ThisChain = Rialto; type BridgedChain = Millau; - type BridgedMessagesInstance = crate::WithMillauMessagesInstance; fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance { - bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance)) - .unwrap_or(bp_rialto::Balance::MAX) + bp_rialto::Balance::try_from( + MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance), + ) + .unwrap_or(bp_rialto::Balance::MAX) } } @@ -128,11 +140,15 @@ impl messages::ThisChainWithMessages for Rialto { } fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { + // `transaction` may represent transaction from the future, when multiplier value will + // be larger, so let's use slightly increased value + let multiplier = FixedU128::saturating_from_rational(110, 100) + .saturating_mul(pallet_transaction_payment::Pallet::::next_fee_multiplier()); // in our testnets, both per-byte fee and weight-to-fee are 1:1 messages::transaction_payment( bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, 1, - FixedU128::zero(), + multiplier, |weight| weight as _, transaction, ) @@ -159,12 +175,15 @@ impl messages::BridgedChainWithMessages for Millau { fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()); + let upper_limit = messages::target::maximal_incoming_message_dispatch_weight( + bp_millau::max_extrinsic_weight(), + ); - // we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment` function + // we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment` + // function // - // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about - // minimal dispatch weight here + // this bridge may be used to deliver all kind of messages, so we're not making any + // assumptions about minimal dispatch weight here 0..=upper_limit } @@ -195,11 +214,14 @@ impl messages::BridgedChainWithMessages for Millau { } fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { + // we don't have a direct access to the value of multiplier at Millau chain + // => it is a messages module parameter + let multiplier = MillauFeeMultiplier::get(); // in our testnets, both per-byte fee and weight-to-fee are 1:1 messages::transaction_payment( bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, 1, - FixedU128::zero(), + multiplier, |weight| weight as _, transaction, ) @@ -221,9 +243,11 @@ impl TargetHeaderChain for Millau fn verify_messages_delivery_proof( proof: Self::MessagesDeliveryProof, ) -> Result<(LaneId, InboundLaneData), Self::Error> { - messages::source::verify_messages_delivery_proof::( - proof, - ) + messages::source::verify_messages_delivery_proof::< + WithMillauMessageBridge, + Runtime, + crate::MillauGrandpaInstance, + >(proof) } } @@ -240,15 +264,16 @@ impl SourceHeaderChain for Millau { proof: Self::MessagesProof, messages_count: u32, ) -> Result>, Self::Error> { - messages::target::verify_messages_proof::( - proof, - messages_count, - ) + messages::target::verify_messages_proof::< + WithMillauMessageBridge, + Runtime, + crate::MillauGrandpaInstance, + >(proof, messages_count) } } /// Rialto -> Millau message lane pallet parameters. -#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] +#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] pub enum RialtoToMillauMessagesParameter { /// The conversion formula we use is: `RialtoTokens = MillauTokens * conversion_rate`. MillauToRialtoConversionRate(FixedU128), @@ -257,9 +282,8 @@ pub enum RialtoToMillauMessagesParameter { impl MessagesParameter for RialtoToMillauMessagesParameter { fn save(&self) { match *self { - RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) => { - MillauToRialtoConversionRate::set(conversion_rate) - } + RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) => + MillauToRialtoConversionRate::set(conversion_rate), } } } @@ -274,7 +298,9 @@ mod tests { MessageKey, }; use bp_runtime::{derive_account_id, messages::DispatchFeePayment, SourceAccount}; - use bridge_runtime_common::messages::target::{FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload}; + use bridge_runtime_common::messages::target::{ + FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload, + }; use frame_support::{ traits::Currency, weights::{GetDispatchInfo, WeightToFeePolynomial}, @@ -286,12 +312,15 @@ mod tests { // this test actually belongs to the `bridge-runtime-common` crate, but there we have no // mock runtime. Making another one there just for this test, given that both crates // live n single repo is an overkill - let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::().unwrap().into(); + let mut ext: sp_io::TestExternalities = + SystemConfig::default().build_storage::().unwrap().into(); ext.execute_with(|| { let bridge = MILLAU_CHAIN_ID; - let call: Call = SystemCall::remark(vec![]).into(); + let call: Call = SystemCall::remark { remark: vec![] }.into(); let dispatch_weight = call.get_dispatch_info().weight; - let dispatch_fee = ::WeightToFee::calc(&dispatch_weight); + let dispatch_fee = ::WeightToFee::calc( + &dispatch_weight, + ); assert!(dispatch_fee > 0); // create relayer account with minimal balance @@ -303,12 +332,13 @@ mod tests { ); // create dispatch account with minimal balance + dispatch fee - let dispatch_account = derive_account_id::<::SourceChainAccountId>( - bridge, - SourceAccount::Root, - ); + let dispatch_account = derive_account_id::< + ::SourceChainAccountId, + >(bridge, SourceAccount::Root); let dispatch_account = - ::AccountIdConverter::convert(dispatch_account); + ::AccountIdConverter::convert( + dispatch_account, + ); let _ = as Currency>::deposit_creating( &dispatch_account, initial_amount + dispatch_fee, @@ -318,10 +348,7 @@ mod tests { FromMillauMessageDispatch::dispatch( &relayer_account, DispatchMessage { - key: MessageKey { - lane_id: Default::default(), - nonce: 0, - }, + key: MessageKey { lane_id: Default::default(), nonce: 0 }, data: DispatchMessageData { payload: Ok(FromBridgedChainMessagePayload:: { spec_version: VERSION.spec_version, @@ -337,11 +364,15 @@ mod tests { // ensure that fee has been transferred from dispatch to relayer account assert_eq!( - as Currency>::free_balance(&relayer_account), + as Currency>::free_balance( + &relayer_account + ), initial_amount + dispatch_fee, ); assert_eq!( - as Currency>::free_balance(&dispatch_account), + as Currency>::free_balance( + &dispatch_account + ), initial_amount, ); }); diff --git a/polkadot/bridges/bin/rialto/runtime/src/parachains.rs b/polkadot/bridges/bin/rialto/runtime/src/parachains.rs new file mode 100644 index 0000000000000000000000000000000000000000..332a3387ac69a297d56dd81132d36340e84b42bc --- /dev/null +++ b/polkadot/bridges/bin/rialto/runtime/src/parachains.rs @@ -0,0 +1,160 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Parachains support in Rialto runtime. + +use crate::{AccountId, Balance, Balances, BlockNumber, Event, Origin, Registrar, Runtime, Slots}; + +use frame_support::{parameter_types, weights::Weight}; +use frame_system::EnsureRoot; +use polkadot_primitives::v1::ValidatorIndex; +use polkadot_runtime_common::{paras_registrar, paras_sudo_wrapper, slots}; +use polkadot_runtime_parachains::{ + configuration as parachains_configuration, dmp as parachains_dmp, hrmp as parachains_hrmp, + inclusion as parachains_inclusion, initializer as parachains_initializer, + origin as parachains_origin, paras as parachains_paras, + paras_inherent as parachains_paras_inherent, scheduler as parachains_scheduler, + session_info as parachains_session_info, shared as parachains_shared, ump as parachains_ump, +}; + +/// Special `RewardValidators` that does nothing ;) +pub struct RewardValidators; +impl polkadot_runtime_parachains::inclusion::RewardValidators for RewardValidators { + fn reward_backing(_: impl IntoIterator) {} + fn reward_bitfields(_: impl IntoIterator) {} +} + +// all required parachain modules from `polkadot-runtime-parachains` crate + +impl parachains_configuration::Config for Runtime { + type WeightInfo = parachains_configuration::TestWeightInfo; +} + +impl parachains_dmp::Config for Runtime {} + +impl parachains_hrmp::Config for Runtime { + type Event = Event; + type Origin = Origin; + type Currency = Balances; +} + +impl parachains_inclusion::Config for Runtime { + type Event = Event; + type RewardValidators = RewardValidators; + type DisputesHandler = (); +} + +impl parachains_initializer::Config for Runtime { + type Randomness = pallet_babe::RandomnessFromOneEpochAgo; + type ForceOrigin = EnsureRoot; + type WeightInfo = (); +} + +impl parachains_origin::Config for Runtime {} + +impl parachains_paras::Config for Runtime { + type Origin = Origin; + type Event = Event; + type WeightInfo = parachains_paras::TestWeightInfo; +} + +impl parachains_paras_inherent::Config for Runtime { + type WeightInfo = parachains_paras_inherent::TestWeightInfo; +} + +impl parachains_scheduler::Config for Runtime {} + +impl parachains_session_info::Config for Runtime {} + +impl parachains_shared::Config for Runtime {} + +parameter_types! { + pub const FirstMessageFactorPercent: u64 = 100; +} + +impl parachains_ump::Config for Runtime { + type Event = Event; + type UmpSink = (); + type FirstMessageFactorPercent = FirstMessageFactorPercent; + type ExecuteOverweightOrigin = EnsureRoot; +} + +// required onboarding pallets. We're not going to use auctions or crowdloans, so they're missing + +parameter_types! { + pub const ParaDeposit: Balance = 0; + pub const DataDepositPerByte: Balance = 0; +} + +impl paras_registrar::Config for Runtime { + type Event = Event; + type Origin = Origin; + type Currency = Balances; + type OnSwap = Slots; + type ParaDeposit = ParaDeposit; + type DataDepositPerByte = DataDepositPerByte; + type WeightInfo = paras_registrar::TestWeightInfo; +} + +parameter_types! { + pub const LeasePeriod: BlockNumber = 10 * bp_rialto::MINUTES; +} + +impl slots::Config for Runtime { + type Event = Event; + type Currency = Balances; + type Registrar = Registrar; + type LeasePeriod = LeasePeriod; + type WeightInfo = slots::TestWeightInfo; + type LeaseOffset = (); +} + +impl paras_sudo_wrapper::Config for Runtime {} + +pub struct ZeroWeights; + +impl polkadot_runtime_common::paras_registrar::WeightInfo for ZeroWeights { + fn reserve() -> Weight { + 0 + } + fn register() -> Weight { + 0 + } + fn force_register() -> Weight { + 0 + } + fn deregister() -> Weight { + 0 + } + fn swap() -> Weight { + 0 + } +} + +impl polkadot_runtime_common::slots::WeightInfo for ZeroWeights { + fn force_lease() -> Weight { + 0 + } + fn manage_lease_period_start(_c: u32, _t: u32) -> Weight { + 0 + } + fn clear_all_leases() -> Weight { + 0 + } + fn trigger_onboard() -> Weight { + 0 + } +} diff --git a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs b/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs deleted file mode 100644 index 77bd288e864812a755c11d533a798a20286d9647..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Configuration parameters for the Rialto PoA chain. - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy, ValidatorsConfiguration, - ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(8); - pub BridgeAuraConfiguration: AuraConfiguration = - aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - validators_configuration(); -} - -/// Max number of finalized headers to keep. -const FINALIZED_HEADERS_TO_KEEP: u64 = 5_000; - -/// Aura engine configuration for Rialto chain. -pub fn aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: 0xfffffffff, - strict_empty_steps_transition: 0, - validate_step_transition: 0, - validate_score_transition: 0, - two_thirds_majority_transition: u64::MAX, - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::MAX, - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Rialto PoA chain. -pub fn validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(genesis_validators())) -} - -/// Genesis validators set of Rialto PoA chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("005e714f896a8b7cede9d38688c1a81de72a58e4").into(), - hex!("007594304039c2937a12220338aab821d819f5a4").into(), - hex!("004e7a39907f090e19b0b80a277e77b72b22e269").into(), - ] -} - -/// Genesis header of the Rialto PoA chain. -/// -/// To obtain genesis header from a running node, invoke: -/// ```bash -/// $ http localhost:8545 jsonrpc=2.0 id=1 method=eth_getBlockByNumber params:='["earliest", false]' -v -/// ``` -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), - extra_data: vec![], - state_root: hex!("a992d04c791620ed7ed96555a80cf0568355bb4bee2656f46899a4372f25f248").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 0x222222.into(), - difficulty: 0x20000.into(), - seal: vec![vec![0x80], { - let mut vec = vec![0xb8, 0x41]; - vec.resize(67, 0); - vec - }], - } -} - -/// Rialto PoA headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl TPruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// `ChainTime` provider -#[derive(Default)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Rialto PoA Blockchain as seen by the runtime. -pub struct RialtoBlockchain; - -impl InclusionProofVerifier for RialtoBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { - let is_transaction_finalized = - crate::BridgeRialtoPoa::verify_transaction_finalized(proof.block, proof.index, &proof.proof); - - if !is_transaction_finalized { - return None; - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn genesis_hash_matches() { - assert_eq!( - genesis_header().compute_hash(), - hex!("1468e1a0fa20d30025a5a0f87e1cced4fdc393b84b7d2850b11ca5863db482cb").into(), - ); - } - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 1_000), - 0, - "1_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 5_000), - 0, - "5_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 5_000, - "5_000 <= 10_000 => we're ready to prune first 5_000 headers", - ); - } -} diff --git a/polkadot/bridges/bin/runtime-common/Cargo.toml b/polkadot/bridges/bin/runtime-common/Cargo.toml index 928523af0256a87b626cf934b6d6770ea376994d..4e693f05451c97de2d7452c9e3137976393dc0f6 100644 --- a/polkadot/bridges/bin/runtime-common/Cargo.toml +++ b/polkadot/bridges/bin/runtime-common/Cargo.toml @@ -8,7 +8,7 @@ repository = "https://github.com/paritytech/parity-bridges-common/" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive"] } ed25519-dalek = { version = "1.0", default-features = false, optional = true } hash-db = { version = "0.15.2", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } @@ -24,13 +24,13 @@ pallet-bridge-messages = { path = "../../modules/messages", default-features = f # Substrate dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] diff --git a/polkadot/bridges/bin/runtime-common/src/lib.rs b/polkadot/bridges/bin/runtime-common/src/lib.rs index ae7efb4a4196866f61f94a8e627f603f1afc7fb6..66f2c6c3a01f1e8178a73ecdbc67404d90db6ddf 100644 --- a/polkadot/bridges/bin/runtime-common/src/lib.rs +++ b/polkadot/bridges/bin/runtime-common/src/lib.rs @@ -19,4 +19,5 @@ #![cfg_attr(not(feature = "std"), no_std)] pub mod messages; +pub mod messages_api; pub mod messages_benchmarking; diff --git a/polkadot/bridges/bin/runtime-common/src/messages.rs b/polkadot/bridges/bin/runtime-common/src/messages.rs index 08f766e2368642b2e4c4255512a665987299b003..b34cbb85540d49e2169655d324a0aa614c1e6ad4 100644 --- a/polkadot/bridges/bin/runtime-common/src/messages.rs +++ b/polkadot/bridges/bin/runtime-common/src/messages.rs @@ -32,17 +32,20 @@ use bp_runtime::{ }; use codec::{Decode, Encode}; use frame_support::{ - traits::{Currency, ExistenceRequirement, Instance}, + traits::{Currency, ExistenceRequirement}, weights::{Weight, WeightToFeePolynomial}, RuntimeDebug, }; use hash_db::Hasher; use scale_info::TypeInfo; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul}, + traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul, Saturating, Zero}, FixedPointNumber, FixedPointOperand, FixedU128, }; -use sp_std::{cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive, vec::Vec}; +use sp_std::{ + cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive, + vec::Vec, +}; use sp_trie::StorageProof; /// Bidirectional message bridge. @@ -54,16 +57,20 @@ pub trait MessageBridge { const THIS_CHAIN_ID: ChainId; /// Identifier of the Bridged chain. const BRIDGED_CHAIN_ID: ChainId; + /// Name of the paired messages pallet instance at the Bridged chain. + /// + /// Should be the name that is used in the `construct_runtime!()` macro. + const BRIDGED_MESSAGES_PALLET_NAME: &'static str; /// This chain in context of message bridge. type ThisChain: ThisChainWithMessages; /// Bridged chain in context of message bridge. type BridgedChain: BridgedChainWithMessages; - /// Instance of the `pallet-bridge-messages` pallet at the Bridged chain. - type BridgedMessagesInstance: Instance; /// Convert Bridged chain balance into This chain balance. - fn bridged_balance_to_this_balance(bridged_balance: BalanceOf>) -> BalanceOf>; + fn bridged_balance_to_this_balance( + bridged_balance: BalanceOf>, + ) -> BalanceOf>; } /// Chain that has `pallet-bridge-messages` and `dispatch` modules. @@ -73,16 +80,23 @@ pub trait ChainWithMessages { /// Accound id on the chain. type AccountId: Encode + Decode; /// Public key of the chain account that may be used to verify signatures. - type Signer: Decode; + type Signer: Encode + Decode; /// Signature type used on the chain. - type Signature: Decode; + type Signature: Encode + Decode; /// Type of weight that is used on the chain. This would almost always be a regular /// `frame_support::weight::Weight`. But since the meaning of weight on different chains /// may be different, the `WeightOf<>` construct is used to avoid confusion between /// different weights. type Weight: From + PartialOrd; /// Type of balances that is used on the chain. - type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From + Copy; + type Balance: Encode + + Decode + + CheckedAdd + + CheckedDiv + + CheckedMul + + PartialOrd + + From + + Copy; } /// Message related transaction parameters estimation. @@ -137,30 +151,40 @@ pub trait BridgedChainWithMessages: ChainWithMessages { message_dispatch_weight: WeightOf, ) -> MessageTransaction>; - /// Returns minimal transaction fee that must be paid for given transaction at the Bridged chain. + /// Returns minimal transaction fee that must be paid for given transaction at the Bridged + /// chain. fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf; } -pub(crate) type ThisChain = ::ThisChain; -pub(crate) type BridgedChain = ::BridgedChain; -pub(crate) type HashOf = ::Hash; -pub(crate) type AccountIdOf = ::AccountId; -pub(crate) type SignerOf = ::Signer; -pub(crate) type SignatureOf = ::Signature; -pub(crate) type WeightOf = ::Weight; -pub(crate) type BalanceOf = ::Balance; - -pub(crate) type CallOf = ::Call; +/// This chain in context of message bridge. +pub type ThisChain = ::ThisChain; +/// Bridged chain in context of message bridge. +pub type BridgedChain = ::BridgedChain; +/// Hash used on the chain. +pub type HashOf = ::Hash; +/// Account id used on the chain. +pub type AccountIdOf = ::AccountId; +/// Public key of the chain account that may be used to verify signature. +pub type SignerOf = ::Signer; +/// Signature type used on the chain. +pub type SignatureOf = ::Signature; +/// Type of weight that used on the chain. +pub type WeightOf = ::Weight; +/// Type of balances that is used on the chain. +pub type BalanceOf = ::Balance; +/// Type of call that is used on this chain. +pub type CallOf = ::Call; /// Raw storage proof type (just raw trie nodes). type RawStorageProof = Vec>; /// Compute fee of transaction at runtime where regular transaction payment pallet is being used. /// -/// The value of `multiplier` parameter is the expected value of `pallet_transaction_payment::NextFeeMultiplier` -/// at the moment when transaction is submitted. If you're charging this payment in advance (and that's what -/// happens with delivery and confirmation transaction in this crate), then there's a chance that the actual -/// fee will be larger than what is paid in advance. So the value must be chosen carefully. +/// The value of `multiplier` parameter is the expected value of +/// `pallet_transaction_payment::NextFeeMultiplier` at the moment when transaction is submitted. If +/// you're charging this payment in advance (and that's what happens with delivery and confirmation +/// transaction in this crate), then there's a chance that the actual fee will be larger than what +/// is paid in advance. So the value must be chosen carefully. pub fn transaction_payment( base_extrinsic_weight: Weight, per_byte_fee: Balance, @@ -223,7 +247,8 @@ pub mod source { } /// 'Parsed' message delivery proof - inbound lane id and its state. - pub type ParsedMessagesDeliveryProofFromBridgedChain = (LaneId, InboundLaneData>>); + pub type ParsedMessagesDeliveryProofFromBridgedChain = + (LaneId, InboundLaneData>>); /// Message verifier that is doing all basic checks. /// @@ -235,19 +260,30 @@ pub mod source { /// Following checks are made: /// /// - message is rejected if its lane is currently blocked; - /// - message is rejected if there are too many pending (undelivered) messages at the outbound lane; - /// - check that the sender has rights to dispatch the call on target chain using provided dispatch origin; + /// - message is rejected if there are too many pending (undelivered) messages at the outbound + /// lane; + /// - check that the sender has rights to dispatch the call on target chain using provided + /// dispatch origin; /// - check that the sender has paid enough funds for both message delivery and dispatch. #[derive(RuntimeDebug)] pub struct FromThisChainMessageVerifier(PhantomData); - pub(crate) const OUTBOUND_LANE_DISABLED: &str = "The outbound message lane is disabled."; - pub(crate) const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane."; - pub(crate) const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin."; - pub(crate) const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane."; - - impl LaneMessageVerifier>, FromThisChainMessagePayload, BalanceOf>> - for FromThisChainMessageVerifier + /// The error message returned from LaneMessageVerifier when outbound lane is disabled. + pub const OUTBOUND_LANE_DISABLED: &str = "The outbound message lane is disabled."; + /// The error message returned from LaneMessageVerifier when too many pending messages at the + /// lane. + pub const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane."; + /// The error message returned from LaneMessageVerifier when call origin is mismatch. + pub const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin."; + /// The error message returned from LaneMessageVerifier when the message fee is too low. + pub const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane."; + + impl + LaneMessageVerifier< + AccountIdOf>, + FromThisChainMessagePayload, + BalanceOf>, + > for FromThisChainMessageVerifier where B: MessageBridge, AccountIdOf>: PartialEq + Clone, @@ -263,7 +299,7 @@ pub mod source { ) -> Result<(), Self::Error> { // reject message if lane is blocked if !ThisChain::::is_outbound_lane_enabled(lane) { - return Err(OUTBOUND_LANE_DISABLED); + return Err(OUTBOUND_LANE_DISABLED) } // reject message if there are too many pending messages at this lane @@ -272,19 +308,20 @@ pub mod source { .latest_generated_nonce .saturating_sub(lane_outbound_data.latest_received_nonce); if pending_messages > max_pending_messages { - return Err(TOO_MANY_PENDING_MESSAGES); + return Err(TOO_MANY_PENDING_MESSAGES) } // Do the dispatch-specific check. We assume that the target chain uses // `Dispatch`, so we verify the message accordingly. - pallet_bridge_dispatch::verify_message_origin(submitter, payload).map_err(|_| BAD_ORIGIN)?; + pallet_bridge_dispatch::verify_message_origin(submitter, payload) + .map_err(|_| BAD_ORIGIN)?; let minimal_fee_in_this_tokens = estimate_message_dispatch_and_delivery_fee::(payload, B::RELAYER_FEE_PERCENT)?; // compare with actual fee paid if *delivery_and_dispatch_fee < minimal_fee_in_this_tokens { - return Err(TOO_LOW_FEE); + return Err(TOO_LOW_FEE) } Ok(()) @@ -306,13 +343,13 @@ pub mod source { ) -> Result<(), &'static str> { let weight_limits = BridgedChain::::message_weight_limits(&payload.call); if !weight_limits.contains(&payload.weight.into()) { - return Err("Incorrect message weight declared"); + return Err("Incorrect message weight declared") } // The maximal size of extrinsic at Substrate-based chain depends on the - // `frame_system::Config::MaximumBlockLength` and `frame_system::Config::AvailableBlockRatio` - // constants. This check is here to be sure that the lane won't stuck because message is too - // large to fit into delivery transaction. + // `frame_system::Config::MaximumBlockLength` and + // `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that + // the lane won't stuck because message is too large to fit into delivery transaction. // // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not // the message itself. The proof is always larger than the message. But unless chain state @@ -320,16 +357,17 @@ pub mod source { // transaction also contains signatures and signed extensions. Because of this, we reserve // 1/3 of the the maximal extrinsic weight for this data. if payload.call.len() > maximal_message_size::() as usize { - return Err("The message is too large to be sent over the lane"); + return Err("The message is too large to be sent over the lane") } Ok(()) } - /// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged chain. + /// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged + /// chain. /// - /// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional conversions. - /// Returns `None` if overflow has happened. + /// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional + /// conversions. Returns `None` if overflow has happened. pub fn estimate_message_dispatch_and_delivery_fee( payload: &FromThisChainMessagePayload, relayer_fee_percent: u32, @@ -338,25 +376,23 @@ pub mod source { // // if we're going to pay dispatch fee at the target chain, then we don't include weight // of the message dispatch in the delivery transaction cost - let pay_dispatch_fee_at_target_chain = payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; + let pay_dispatch_fee_at_target_chain = + payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; let delivery_transaction = BridgedChain::::estimate_delivery_transaction( - &payload.call, + &payload.encode(), pay_dispatch_fee_at_target_chain, - if pay_dispatch_fee_at_target_chain { - 0.into() - } else { - payload.weight.into() - }, + if pay_dispatch_fee_at_target_chain { 0.into() } else { payload.weight.into() }, ); let delivery_transaction_fee = BridgedChain::::transaction_payment(delivery_transaction); // the fee (in This tokens) of all transactions that are made on This chain let confirmation_transaction = ThisChain::::estimate_delivery_confirmation_transaction(); - let confirmation_transaction_fee = ThisChain::::transaction_payment(confirmation_transaction); + let confirmation_transaction_fee = + ThisChain::::transaction_payment(confirmation_transaction); // minimal fee (in This tokens) is a sum of all required fees - let minimal_fee = - B::bridged_balance_to_this_balance(delivery_transaction_fee).checked_add(&confirmation_transaction_fee); + let minimal_fee = B::bridged_balance_to_this_balance(delivery_transaction_fee) + .checked_add(&confirmation_transaction_fee); // before returning, add extra fee that is paid to the relayer (relayer interest) minimal_fee @@ -377,14 +413,14 @@ pub mod source { ) -> Result, &'static str> where ThisRuntime: pallet_bridge_grandpa::Config, - HashOf>: - Into>::BridgedChain>>, + HashOf>: Into< + bp_runtime::HashOf< + >::BridgedChain, + >, + >, { - let FromBridgedChainMessagesDeliveryProof { - bridged_header_hash, - storage_proof, - lane, - } = proof; + let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = + proof; pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( bridged_header_hash.into(), StorageProof::new(storage_proof), @@ -392,7 +428,7 @@ pub mod source { // Messages delivery proof is just proof of single storage key read => any error // is fatal. let storage_inbound_lane_data_key = - pallet_bridge_messages::storage_keys::inbound_lane_data_key::(&lane); + pallet_bridge_messages::storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane); let raw_inbound_lane_data = storage .read_value(storage_inbound_lane_data_key.0.as_ref()) .map_err(|_| "Failed to read inbound lane state from storage proof")? @@ -469,14 +505,13 @@ pub mod target { impl FromBridgedChainEncodedMessageCall { /// Create encoded call. pub fn new(encoded_call: Vec) -> Self { - FromBridgedChainEncodedMessageCall { - encoded_call, - _marker: PhantomData::default(), - } + FromBridgedChainEncodedMessageCall { encoded_call, _marker: PhantomData::default() } } } - impl From> for Result { + impl From> + for Result + { fn from(encoded_call: FromBridgedChainEncodedMessageCall) -> Self { DecodedCall::decode(&mut &encoded_call.encoded_call[..]).map_err(drop) } @@ -492,20 +527,24 @@ pub mod target { MessageDispatch>, BalanceOf>> for FromBridgedChainMessageDispatch where - ThisDispatchInstance: frame_support::traits::Instance, - ThisRuntime: pallet_bridge_dispatch::Config - + pallet_transaction_payment::Config, + BalanceOf>: Saturating + FixedPointOperand, + ThisDispatchInstance: 'static, + ThisRuntime: pallet_bridge_dispatch::Config< + ThisDispatchInstance, + BridgeMessageId = (LaneId, MessageNonce), + > + pallet_transaction_payment::Config, ::OnChargeTransaction: - pallet_transaction_payment::OnChargeTransaction>>, + pallet_transaction_payment::OnChargeTransaction< + ThisRuntime, + Balance = BalanceOf>, + >, ThisCurrency: Currency>, Balance = BalanceOf>>, - >::Event: From< - pallet_bridge_dispatch::RawEvent<(LaneId, MessageNonce), AccountIdOf>, ThisDispatchInstance>, - >, - pallet_bridge_dispatch::Pallet: bp_message_dispatch::MessageDispatch< - AccountIdOf>, - (LaneId, MessageNonce), - Message = FromBridgedChainMessagePayload, - >, + pallet_bridge_dispatch::Pallet: + bp_message_dispatch::MessageDispatch< + AccountIdOf>, + (LaneId, MessageNonce), + Message = FromBridgedChainMessagePayload, + >, { type DispatchPayload = FromBridgedChainMessagePayload; @@ -526,13 +565,22 @@ pub mod target { message_id, message.data.payload.map_err(drop), |dispatch_origin, dispatch_weight| { - ThisCurrency::transfer( - dispatch_origin, - relayer_account, - ThisRuntime::WeightToFee::calc(&dispatch_weight), - ExistenceRequirement::AllowDeath, - ) - .map_err(drop) + let unadjusted_weight_fee = ThisRuntime::WeightToFee::calc(&dispatch_weight); + let fee_multiplier = + pallet_transaction_payment::Pallet::::next_fee_multiplier(); + let adjusted_weight_fee = + fee_multiplier.saturating_mul_int(unadjusted_weight_fee); + if !adjusted_weight_fee.is_zero() { + ThisCurrency::transfer( + dispatch_origin, + relayer_account, + adjusted_weight_fee, + ExistenceRequirement::AllowDeath, + ) + .map_err(drop) + } else { + Ok(()) + } }, ) } @@ -559,9 +607,11 @@ pub mod target { ) -> Result>>>, &'static str> where ThisRuntime: pallet_bridge_grandpa::Config, - ThisRuntime: pallet_bridge_messages::Config, - HashOf>: - Into>::BridgedChain>>, + HashOf>: Into< + bp_runtime::HashOf< + >::BridgedChain, + >, + >, { verify_messages_proof_with_parser::( proof, @@ -596,12 +646,13 @@ pub mod target { fn from(err: MessageProofError) -> &'static str { match err { MessageProofError::Empty => "Messages proof is empty", - MessageProofError::MessagesCountMismatch => "Declared messages count doesn't match actual value", + MessageProofError::MessagesCountMismatch => + "Declared messages count doesn't match actual value", MessageProofError::MissingRequiredMessage => "Message is missing from the proof", - MessageProofError::FailedToDecodeMessage => "Failed to decode message from the proof", - MessageProofError::FailedToDecodeOutboundLaneState => { - "Failed to decode outbound lane data from the proof" - } + MessageProofError::FailedToDecodeMessage => + "Failed to decode message from the proof", + MessageProofError::FailedToDecodeOutboundLaneState => + "Failed to decode outbound lane data from the proof", MessageProofError::Custom(err) => err, } } @@ -624,14 +675,16 @@ pub mod target { { fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option> { let storage_outbound_lane_data_key = - pallet_bridge_messages::storage_keys::outbound_lane_data_key::(lane_id); - self.storage - .read_value(storage_outbound_lane_data_key.0.as_ref()) - .ok()? + pallet_bridge_messages::storage_keys::outbound_lane_data_key( + B::BRIDGED_MESSAGES_PALLET_NAME, + lane_id, + ); + self.storage.read_value(storage_outbound_lane_data_key.0.as_ref()).ok()? } fn read_raw_message(&self, message_key: &MessageKey) -> Option> { - let storage_message_key = pallet_bridge_messages::storage_keys::message_key::( + let storage_message_key = pallet_bridge_messages::storage_keys::message_key( + B::BRIDGED_MESSAGES_PALLET_NAME, &message_key.lane_id, message_key.nonce, ); @@ -646,7 +699,8 @@ pub mod target { build_parser: BuildParser, ) -> Result>>>, MessageProofError> where - BuildParser: FnOnce(HashOf>, RawStorageProof) -> Result, + BuildParser: + FnOnce(HashOf>, RawStorageProof) -> Result, Parser: MessageProofParser, { let FromBridgedChainMessagesProof { @@ -658,18 +712,19 @@ pub mod target { } = proof; // receiving proofs where end < begin is ok (if proof includes outbound lane state) - let messages_in_the_proof = if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) { - // let's check that the user (relayer) has passed correct `messages_count` - // (this bounds maximal capacity of messages vec below) - let messages_in_the_proof = nonces_difference.saturating_add(1); - if messages_in_the_proof != MessageNonce::from(messages_count) { - return Err(MessageProofError::MessagesCountMismatch); - } + let messages_in_the_proof = + if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) { + // let's check that the user (relayer) has passed correct `messages_count` + // (this bounds maximal capacity of messages vec below) + let messages_in_the_proof = nonces_difference.saturating_add(1); + if messages_in_the_proof != MessageNonce::from(messages_count) { + return Err(MessageProofError::MessagesCountMismatch) + } - messages_in_the_proof - } else { - 0 - }; + messages_in_the_proof + } else { + 0 + }; let parser = build_parser(bridged_header_hash, storage_proof)?; @@ -683,20 +738,15 @@ pub mod target { let raw_message_data = parser .read_raw_message(&message_key) .ok_or(MessageProofError::MissingRequiredMessage)?; - let message_data = MessageData::>>::decode(&mut &raw_message_data[..]) - .map_err(|_| MessageProofError::FailedToDecodeMessage)?; - messages.push(Message { - key: message_key, - data: message_data, - }); + let message_data = + MessageData::>>::decode(&mut &raw_message_data[..]) + .map_err(|_| MessageProofError::FailedToDecodeMessage)?; + messages.push(Message { key: message_key, data: message_data }); } // Now let's check if proof contains outbound lane state proof. It is optional, so we // simply ignore `read_value` errors and missing value. - let mut proved_lane_messages = ProvedLaneMessages { - lane_state: None, - messages, - }; + let mut proved_lane_messages = ProvedLaneMessages { lane_state: None, messages }; let raw_outbound_lane_data = parser.read_raw_outbound_lane_data(&lane); if let Some(raw_outbound_lane_data) = raw_outbound_lane_data { proved_lane_messages.lane_state = Some( @@ -707,7 +757,7 @@ pub mod target { // Now we may actually check if the proof is empty or not. if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { - return Err(MessageProofError::Empty); + return Err(MessageProofError::Empty) } // We only support single lane messages in this schema @@ -733,7 +783,8 @@ mod tests { const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: Weight = 2048; const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; - /// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from BridgedChain; + /// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from + /// BridgedChain; #[derive(Debug, PartialEq, Eq)] struct OnThisChainBridge; @@ -741,17 +792,20 @@ mod tests { const RELAYER_FEE_PERCENT: u32 = 10; const THIS_CHAIN_ID: ChainId = *b"this"; const BRIDGED_CHAIN_ID: ChainId = *b"brdg"; + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; type ThisChain = ThisChain; type BridgedChain = BridgedChain; - type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance; - fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance { + fn bridged_balance_to_this_balance( + bridged_balance: BridgedChainBalance, + ) -> ThisChainBalance { ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32) } } - /// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from ThisChain; + /// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from + /// ThisChain; #[derive(Debug, PartialEq, Eq)] struct OnBridgedChainBridge; @@ -759,10 +813,10 @@ mod tests { const RELAYER_FEE_PERCENT: u32 = 20; const THIS_CHAIN_ID: ChainId = *b"brdg"; const BRIDGED_CHAIN_ID: ChainId = *b"this"; + const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; type ThisChain = BridgedChain; type BridgedChain = ThisChain; - type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance; fn bridged_balance_to_this_balance(_this_balance: ThisChainBalance) -> BridgedChainBalance { unreachable!() @@ -886,7 +940,9 @@ mod tests { } fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { - ThisChainBalance(transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) + ThisChainBalance( + transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32, + ) } } @@ -907,7 +963,9 @@ mod tests { unreachable!() } - fn transaction_payment(_transaction: MessageTransaction>) -> BalanceOf { + fn transaction_payment( + _transaction: MessageTransaction>, + ) -> BalanceOf { unreachable!() } } @@ -938,7 +996,9 @@ mod tests { unreachable!() } - fn transaction_payment(_transaction: MessageTransaction>) -> BalanceOf { + fn transaction_payment( + _transaction: MessageTransaction>, + ) -> BalanceOf { unreachable!() } } @@ -949,7 +1009,8 @@ mod tests { } fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive { - let begin = std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight); + let begin = + std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight); begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT } @@ -965,7 +1026,9 @@ mod tests { } fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { - BridgedChainBalance(transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) + BridgedChainBalance( + transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32, + ) } } @@ -976,19 +1039,22 @@ mod tests { #[test] fn message_from_bridged_chain_is_decoded() { // the message is encoded on the bridged chain - let message_on_bridged_chain = source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - call: ThisChainCall::Transfer.encode(), - } - .encode(); + let message_on_bridged_chain = + source::FromThisChainMessagePayload:: { + spec_version: 1, + weight: 100, + origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, + call: ThisChainCall::Transfer.encode(), + } + .encode(); // and sent to this chain where it is decoded let message_on_this_chain = - target::FromBridgedChainMessagePayload::::decode(&mut &message_on_bridged_chain[..]) - .unwrap(); + target::FromBridgedChainMessagePayload::::decode( + &mut &message_on_bridged_chain[..], + ) + .unwrap(); assert_eq!( message_on_this_chain, target::FromBridgedChainMessagePayload:: { @@ -1007,7 +1073,8 @@ mod tests { const TEST_LANE_ID: &LaneId = b"test"; const MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE: MessageNonce = 32; - fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload { + fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload + { source::FromThisChainMessagePayload:: { spec_version: 1, weight: 100, @@ -1036,11 +1103,14 @@ mod tests { // let's check if estimation is less than hardcoded, if dispatch is paid at target chain let mut payload_with_pay_on_target = regular_outbound_message_payload(); payload_with_pay_on_target.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; - let fee_at_source = source::estimate_message_dispatch_and_delivery_fee::( - &payload_with_pay_on_target, - OnThisChainBridge::RELAYER_FEE_PERCENT, - ) - .expect("estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message"); + let fee_at_source = + source::estimate_message_dispatch_and_delivery_fee::( + &payload_with_pay_on_target, + OnThisChainBridge::RELAYER_FEE_PERCENT, + ) + .expect( + "estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message", + ); assert!( fee_at_source < EXPECTED_MINIMAL_FEE.into(), "Computed fee {:?} without prepaid dispatch must be less than the fee with prepaid dispatch {}", @@ -1059,16 +1129,14 @@ mod tests { ), Err(source::TOO_LOW_FEE) ); - assert!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Root, - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(), - ); + assert!(source::FromThisChainMessageVerifier::::verify_message( + &Sender::Root, + &ThisChainBalance(1_000_000), + TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ) + .is_ok(),); } #[test] @@ -1103,16 +1171,14 @@ mod tests { ), Err(source::BAD_ORIGIN) ); - assert!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Root, - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(), - ); + assert!(source::FromThisChainMessageVerifier::::verify_message( + &Sender::Root, + &ThisChainBalance(1_000_000), + TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ) + .is_ok(),); } #[test] @@ -1137,16 +1203,14 @@ mod tests { ), Err(source::BAD_ORIGIN) ); - assert!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Signed(ThisChainAccountId(1)), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(), - ); + assert!(source::FromThisChainMessageVerifier::::verify_message( + &Sender::Signed(ThisChainAccountId(1)), + &ThisChainBalance(1_000_000), + TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ) + .is_ok(),); } #[test] @@ -1183,64 +1247,58 @@ mod tests { #[test] fn verify_chain_message_rejects_message_with_too_small_declared_weight() { - assert!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { + assert!(source::verify_chain_message::( + &source::FromThisChainMessagePayload:: { spec_version: 1, weight: 5, origin: bp_message_dispatch::CallOrigin::SourceRoot, dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![1, 2, 3, 4, 5, 6], - },) - .is_err() - ); + }, + ) + .is_err()); } #[test] fn verify_chain_message_rejects_message_with_too_large_declared_weight() { - assert!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { + assert!(source::verify_chain_message::( + &source::FromThisChainMessagePayload:: { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1, origin: bp_message_dispatch::CallOrigin::SourceRoot, dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![1, 2, 3, 4, 5, 6], - },) - .is_err() - ); + }, + ) + .is_err()); } #[test] fn verify_chain_message_rejects_message_too_large_message() { - assert!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { + assert!(source::verify_chain_message::( + &source::FromThisChainMessagePayload:: { spec_version: 1, weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, origin: bp_message_dispatch::CallOrigin::SourceRoot, dispatch_fee_payment: DispatchFeePayment::AtSourceChain, call: vec![0; source::maximal_message_size::() as usize + 1], - },) - .is_err() - ); + }, + ) + .is_err()); } #[test] fn verify_chain_message_accepts_maximal_message() { assert_eq!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { - spec_version: 1, - weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![0; source::maximal_message_size::() as _], - },), + source::verify_chain_message::( + &source::FromThisChainMessagePayload:: { + spec_version: 1, + weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, + origin: bp_message_dispatch::CallOrigin::SourceRoot, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + call: vec![0; source::maximal_message_size::() as _], + }, + ), Ok(()), ); } @@ -1332,13 +1390,15 @@ mod tests { #[test] fn message_proof_is_rejected_if_required_message_is_missing() { assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(10), 10, |_, _| Ok( - TestMessageProofParser { + target::verify_messages_proof_with_parser::( + messages_proof(10), + 10, + |_, _| Ok(TestMessageProofParser { failing: false, messages: 1..=5, outbound_lane_data: None, - } - ),), + }), + ), Err(target::MessageProofError::MissingRequiredMessage), ); } @@ -1346,13 +1406,15 @@ mod tests { #[test] fn message_proof_is_rejected_if_message_decode_fails() { assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(10), 10, |_, _| Ok( - TestMessageProofParser { + target::verify_messages_proof_with_parser::( + messages_proof(10), + 10, + |_, _| Ok(TestMessageProofParser { failing: true, messages: 1..=10, outbound_lane_data: None, - } - ),), + }), + ), Err(target::MessageProofError::FailedToDecodeMessage), ); } @@ -1360,8 +1422,10 @@ mod tests { #[test] fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( - TestMessageProofParser { + target::verify_messages_proof_with_parser::( + messages_proof(0), + 0, + |_, _| Ok(TestMessageProofParser { failing: true, messages: no_messages_range(), outbound_lane_data: Some(OutboundLaneData { @@ -1369,8 +1433,8 @@ mod tests { latest_received_nonce: 1, latest_generated_nonce: 1, }), - } - ),), + }), + ), Err(target::MessageProofError::FailedToDecodeOutboundLaneState), ); } @@ -1378,13 +1442,15 @@ mod tests { #[test] fn message_proof_is_rejected_if_it_is_empty() { assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( - TestMessageProofParser { + target::verify_messages_proof_with_parser::( + messages_proof(0), + 0, + |_, _| Ok(TestMessageProofParser { failing: false, messages: no_messages_range(), outbound_lane_data: None, - } - ),), + }), + ), Err(target::MessageProofError::Empty), ); } @@ -1392,8 +1458,10 @@ mod tests { #[test] fn non_empty_message_proof_without_messages_is_accepted() { assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( - TestMessageProofParser { + target::verify_messages_proof_with_parser::( + messages_proof(0), + 0, + |_, _| Ok(TestMessageProofParser { failing: false, messages: no_messages_range(), outbound_lane_data: Some(OutboundLaneData { @@ -1401,8 +1469,8 @@ mod tests { latest_received_nonce: 1, latest_generated_nonce: 1, }), - } - ),), + }), + ), Ok(vec![( Default::default(), ProvedLaneMessages { @@ -1422,8 +1490,10 @@ mod tests { #[test] fn non_empty_message_proof_is_accepted() { assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(1), 1, |_, _| Ok( - TestMessageProofParser { + target::verify_messages_proof_with_parser::( + messages_proof(1), + 1, + |_, _| Ok(TestMessageProofParser { failing: false, messages: 1..=1, outbound_lane_data: Some(OutboundLaneData { @@ -1431,8 +1501,8 @@ mod tests { latest_received_nonce: 1, latest_generated_nonce: 1, }), - } - ),), + }), + ), Ok(vec![( Default::default(), ProvedLaneMessages { @@ -1442,14 +1512,8 @@ mod tests { latest_generated_nonce: 1, }), messages: vec![Message { - key: MessageKey { - lane_id: Default::default(), - nonce: 1 - }, - data: MessageData { - payload: 1u64.encode(), - fee: BridgedChainBalance(0) - }, + key: MessageKey { lane_id: Default::default(), nonce: 1 }, + data: MessageData { payload: 1u64.encode(), fee: BridgedChainBalance(0) }, }], }, )] @@ -1488,10 +1552,7 @@ mod tests { 10, FixedU128::zero(), |weight| weight, - MessageTransaction { - size: 50, - dispatch_weight: 777 - }, + MessageTransaction { size: 50, dispatch_weight: 777 }, ), 100 + 50 * 10, ); @@ -1507,10 +1568,7 @@ mod tests { 10, FixedU128::one(), |weight| weight, - MessageTransaction { - size: 50, - dispatch_weight: 777 - }, + MessageTransaction { size: 50, dispatch_weight: 777 }, ), 100 + 50 * 10 + 777, ); diff --git a/polkadot/bridges/bin/runtime-common/src/messages_api.rs b/polkadot/bridges/bin/runtime-common/src/messages_api.rs new file mode 100644 index 0000000000000000000000000000000000000000..b09a88e62795982c64ffc99a07debff08e9eb24c --- /dev/null +++ b/polkadot/bridges/bin/runtime-common/src/messages_api.rs @@ -0,0 +1,51 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Helpers for implementing various message-related runtime API mthods. + +use crate::messages::{source::FromThisChainMessagePayload, MessageBridge}; + +use bp_messages::{LaneId, MessageDetails, MessageNonce}; +use codec::Decode; +use sp_std::vec::Vec; + +/// Implementation of the `To*OutboundLaneApi::message_details`. +pub fn outbound_message_details( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, +) -> Vec> +where + Runtime: pallet_bridge_messages::Config, + MessagesPalletInstance: 'static, + BridgeConfig: MessageBridge, +{ + (begin..=end) + .filter_map(|nonce| { + let message_data = + pallet_bridge_messages::Pallet::::outbound_message_data(lane, nonce)?; + let decoded_payload = + FromThisChainMessagePayload::::decode(&mut &message_data.payload[..]).ok()?; + Some(MessageDetails { + nonce, + dispatch_weight: decoded_payload.weight, + size: message_data.payload.len() as _, + delivery_and_dispatch_fee: message_data.fee, + dispatch_fee_payment: decoded_payload.dispatch_fee_payment, + }) + }) + .collect() +} diff --git a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs index 3785f4a4607f0af545eca7583b86d3219c76541f..217560e114344c61e502d888d92f130e09732db2 100644 --- a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs @@ -20,8 +20,8 @@ #![cfg(feature = "runtime-benchmarks")] use crate::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, AccountIdOf, BalanceOf, - BridgedChain, HashOf, MessageBridge, ThisChain, + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, + AccountIdOf, BalanceOf, BridgedChain, HashOf, MessageBridge, ThisChain, }; use bp_messages::{LaneId, MessageData, MessageKey, MessagePayload}; @@ -29,13 +29,16 @@ use bp_runtime::ChainId; use codec::Encode; use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH}; use frame_support::weights::Weight; -use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams, ProofSize}; +use pallet_bridge_messages::benchmarking::{ + MessageDeliveryProofParams, MessageProofParams, ProofSize, +}; use sp_core::Hasher; use sp_runtime::traits::Header; use sp_std::prelude::*; use sp_trie::{record_all_keys, trie_types::TrieDBMut, Layout, MemoryDB, Recorder, TrieMut}; -/// Generate ed25519 signature to be used in `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`. +/// Generate ed25519 signature to be used in +/// `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`. /// /// Returns public key of the signer and the signature itself. pub fn ed25519_sign( @@ -47,8 +50,8 @@ pub fn ed25519_sign( ) -> ([u8; 32], [u8; 64]) { // key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html) let target_secret = SecretKey::from_bytes(&[ - 157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, 197, 105, 123, 050, - 105, 025, 112, 059, 172, 003, 028, 174, 127, 096, + 157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, + 197, 105, 123, 050, 105, 025, 112, 059, 172, 003, 028, 174, 127, 096, ]) .expect("harcoded key is valid"); let target_public: PublicKey = (&target_secret).into(); @@ -56,7 +59,8 @@ pub fn ed25519_sign( let mut target_pair_bytes = [0u8; KEYPAIR_LENGTH]; target_pair_bytes[..SECRET_KEY_LENGTH].copy_from_slice(&target_secret.to_bytes()); target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes()); - let target_pair = ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid"); + let target_pair = + ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid"); let signature_message = pallet_bridge_dispatch::account_ownership_digest( target_call, @@ -92,11 +96,8 @@ where MH: Fn(H::Out) -> ::Header, { // prepare Bridged chain storage with messages and (optionally) outbound lane state - let message_count = params - .message_nonces - .end() - .saturating_sub(*params.message_nonces.start()) - + 1; + let message_count = + params.message_nonces.end().saturating_sub(*params.message_nonces.start()) + 1; let mut storage_keys = Vec::with_capacity(message_count as usize + 1); let mut root = Default::default(); let mut mdb = MemoryDB::default(); @@ -105,10 +106,7 @@ where // insert messages for nonce in params.message_nonces.clone() { - let message_key = MessageKey { - lane_id: params.lane, - nonce, - }; + let message_key = MessageKey { lane_id: params.lane, nonce }; let message_data = MessageData { fee: BalanceOf::>::from(0), payload: message_payload.clone(), @@ -220,7 +218,7 @@ fn grow_trie(mut root: H::Out, mdb: &mut MemoryDB, trie_size: Proo .expect("record_all_keys should not fail in benchmarks"); let size: usize = proof_recorder.drain().into_iter().map(|n| n.data.len()).sum(); if size > minimal_trie_size as _ { - return root; + return root } let mut trie = TrieDBMut::::from_existing(mdb, &mut root) diff --git a/polkadot/bridges/ci.Dockerfile b/polkadot/bridges/ci.Dockerfile index d1cab4f92f22d5a7d98dfd77ddd08ee7d6679377..b419f6be54d2a2c12d22111bfb0bc4bdd2ed7558 100644 --- a/polkadot/bridges/ci.Dockerfile +++ b/polkadot/bridges/ci.Dockerfile @@ -24,7 +24,7 @@ USER user WORKDIR /home/user -ARG PROJECT=ethereum-poa-relay +ARG PROJECT=substrate-relay COPY --chown=user:user ./${PROJECT} ./ COPY --chown=user:user ./bridge-entrypoint.sh ./ diff --git a/polkadot/bridges/deny.toml b/polkadot/bridges/deny.toml index e754b8e9bd36286f3c1650d094655bf8df27b36e..d22897182af29127f2ee0994549dd1b70af2fc04 100644 --- a/polkadot/bridges/deny.toml +++ b/polkadot/bridges/deny.toml @@ -48,27 +48,21 @@ notice = "warn" # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. ignore = [ - # generic-array lifetime errasure. If all upstream crates upgrade to >=0.14.0 - # we can remove this. - "RUSTSEC-2020-0146", # yaml-rust < clap. Not feasible to upgrade and also not possible to trigger in practice. "RUSTSEC-2018-0006", - # Comes from wasmtime via Substrate: 'cranelift-codegen' - "RUSTSEC-2021-0067", - # Comes from libp2p via Substrate: 'aes-soft', 'aesni', 'block-cipher', 'stream-cipher' - "RUSTSEC-2021-0060", - "RUSTSEC-2021-0059", - "RUSTSEC-2020-0057", - "RUSTSEC-2021-0064", - # Comes from jsonrpc via Substrate: 'failure', 'net2', 'lock_api' - "RUSTSEC-2020-0036", - "RUSTSEC-2020-0077", - "RUSTSEC-2019-0036", "RUSTSEC-2020-0070", # Comes from honggfuzz via storage-proof-fuzzer: 'memmap' "RUSTSEC-2020-0077", # Comes from time: 'stweb' (will be fixed in upcoming time 0.3) - "RUSTSEC-2020-0056" + "RUSTSEC-2020-0056", + # net2 (origin: Substrate RPC crates) + "RUSTSEC-2020-0016", + # Wasmtime (origin: Substrate executor crates) + "RUSTSEC-2021-0110", + # time (origin: Substrate RPC + benchmarking crates) + "RUSTSEC-2020-0071", + # chrono (origin: Substrate benchmarking + cli + ...) + "RUSTSEC-2020-0159", ] # Threshold for security vulnerabilities, any vulnerability with a CVSS score # lower than the range specified will be ignored. Note that ignored advisories @@ -85,7 +79,7 @@ ignore = [ # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] # The lint level for crates which do not have a detectable license -unlicensed = "deny" +unlicensed = "allow" # List of explictly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.7 short identifier (+ optional exception)]. diff --git a/polkadot/bridges/deployments/README.md b/polkadot/bridges/deployments/README.md index d553fca611a61b8a8986bb091605bd7b30c0da25..920935d5fdb81134c6d7dd2461cf0c85de320cb1 100644 --- a/polkadot/bridges/deployments/README.md +++ b/polkadot/bridges/deployments/README.md @@ -44,16 +44,16 @@ the monitoring Compose file is _not_ optional, and must be included for bridge d ### Running and Updating Deployments We currently support two bridge deployments -1. Ethereum PoA to Rialto Substrate -2. Rialto Substrate to Millau Substrate +1. Rialto Substrate to Millau Substrate +2. Westend Substrate to Millau Substrate These bridges can be deployed using our [`./run.sh`](./run.sh) script. The first argument it takes is the name of the bridge you want to run. Right now we only support two -bridges: `poa-rialto` and `rialto-millau`. +bridges: `rialto-millau` and `westend-millau`. ```bash -./run.sh poa-rialto +./run.sh rialto-millau ``` If you add a second `update` argument to the script it will pull the latest images from Docker Hub @@ -66,7 +66,7 @@ and restart the deployment. You can also bring down a deployment using the script with the `stop` argument. ```bash -./run.sh poa-rialto stop +./run.sh rialto-millau stop ``` ### Adding Deployments @@ -80,7 +80,6 @@ not strictly required. ## General Notes Rialto authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. -Rialto-PoA authorities are named: `Arthur`, `Bertha`, `Carlos`. Millau authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. Both authorities and following accounts have enough funds (for test purposes) on corresponding Substrate chains: @@ -89,11 +88,11 @@ Both authorities and following accounts have enough funds (for test purposes) on - on Millau: `Ferdie`, `George`, `Harry`. Names of accounts on Substrate (Rialto and Millau) chains may be prefixed with `//` and used as -seeds for the `sr25519` keys. This seed may also be used in the signer argument in Substrate -and PoA relays. Example: +seeds for the `sr25519` keys. This seed may also be used in the signer argument in Substrate relays. +Example: ```bash -./substrate-relay relay-headers RialtoToMillau \ +./substrate-relay relay-headers rialto-to-millau \ --source-host rialto-node-alice \ --source-port 9944 \ --target-host millau-node-alice \ @@ -105,13 +104,6 @@ and PoA relays. Example: Some accounts are used by bridge components. Using these accounts to sign other transactions is not recommended, because this may lead to nonces conflict. -Following accounts are used when `poa-rialto` bridge is running: - -- Rialto's `Alice` signs relay transactions with new Rialto-PoA headers; -- Rialto's `Bob` signs relay transactions with Rialto-PoA -> Rialto currency exchange proofs. -- Rialto-PoA's `Arthur`: signs relay transactions with new Rialto headers; -- Rialto-PoA's `Bertha`: signs currency exchange transactions. - Following accounts are used when `rialto-millau` bridge is running: - Millau's `Charlie` signs complex headers+messages relay transactions on Millau chain; @@ -121,7 +113,9 @@ Following accounts are used when `rialto-millau` bridge is running: - Millau's `Eve` signs relay transactions with message delivery confirmations (lane 00000001) from Rialto to Millau; - Rialto's `Eve` signs relay transactions with messages (lane 00000001) from Millau to Rialto; - Millau's `Ferdie` signs relay transactions with messages (lane 00000001) from Rialto to Millau; -- Rialto's `Ferdie` signs relay transactions with message delivery confirmations (lane 00000001) from Millau to Rialto. +- Rialto's `Ferdie` signs relay transactions with message delivery confirmations (lane 00000001) from Millau to Rialto; +- Millau's `RialtoMessagesOwner` signs relay transactions with updated Rialto -> Millau conversion rate; +- Rialto's `MillauMessagesOwner` signs relay transactions with updated Millau -> Rialto conversion rate. Following accounts are used when `westend-millau` bridge is running: @@ -131,10 +125,10 @@ Following accounts are used when `westend-millau` bridge is running: When the network is running you can query logs from individual nodes using: ```bash -docker logs rialto_poa-node-bertha_1 -f +docker logs rialto_millau-node-charlie_1 -f ``` -To kill all left over containers and start the network from scratch next time: +To kill all leftover containers and start the network from scratch next time: ```bash docker ps -a --format "{{.ID}}" | xargs docker rm # This removes all containers! ``` @@ -188,7 +182,6 @@ Here are the arguments currently supported: - `PROJECT`: Project to build withing bridges repo. Can be one of: - `rialto-bridge-node` - `millau-bridge-node` - - `ethereum-poa-relay` - `substrate-relay` ### GitHub Actions diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/Front-end.Dockerfile b/polkadot/bridges/deployments/bridges/poa-rialto/Front-end.Dockerfile deleted file mode 100644 index 25c49cf56c15723dc92efc1fcadb14db5d7df740..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/Front-end.Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM docker.io/library/node:12 as build-deps - -# install tools and dependencies -RUN set -eux; \ - apt-get install -y git - -# clone UI repo -RUN cd /usr/src/ && git clone https://github.com/paritytech/bridge-ui.git -WORKDIR /usr/src/bridge-ui -RUN yarn -ARG SUBSTRATE_PROVIDER -ARG ETHEREUM_PROVIDER -ARG EXPECTED_ETHEREUM_NETWORK_ID - -ENV SUBSTRATE_PROVIDER $SUBSTRATE_PROVIDER -ENV ETHEREUM_PROVIDER $ETHEREUM_PROVIDER -ENV EXPECTED_ETHEREUM_NETWORK_ID $EXPECTED_ETHEREUM_NETWORK_ID - -RUN yarn build:docker - -# Stage 2 - the production environment -FROM docker.io/library/nginx:1.12 -COPY --from=build-deps /usr/src/bridge-ui/nginx/*.conf /etc/nginx/conf.d/ -COPY --from=build-deps /usr/src/bridge-ui/dist /usr/share/nginx/html -EXPOSE 80 -CMD ["nginx", "-g", "daemon off;"] diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json deleted file mode 100644 index 7e197bb882f8cc924ba200a611fb8feb10b2ab30..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json +++ /dev/null @@ -1,474 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 7, - "x": 0, - "y": 0 - }, - "id": 2, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_best_block_numbers", - "instant": true, - "interval": "", - "legendFormat": "Best {{type}} block", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best finalized blocks", - "type": "stat" - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 5, - "x": 7, - "y": 0 - }, - "id": 12, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_processed_blocks", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of processed blocks since last restart", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 6, - "x": 18, - "y": 0 - }, - "id": 8, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_process_cpu_usage_percentage", - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 7 - }, - "id": 14, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_processed_transactions", - "instant": true, - "interval": "", - "legendFormat": "{{type}} transactions", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of processed transactions since last restart", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Ethereum PoA to Rialto Exchange Dashboard", - "uid": "relay-poa-to-rialto-exchange", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json deleted file mode 100644 index 05d06e949819166e25d63d461eb56915ab8806e4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Ethereum PoA to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}) - max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Ethereum PoA to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of new Headers on Ethereum PoA (Last 2 Mins)", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Rialto (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Ethereum_to_Substrate_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Ethereum PoA to Rialto Header Sync Dashboard", - "uid": "relay-poa-to-rialto-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json deleted file mode 100644 index 149c637fcb15641b5a73e0d84661bbbd2dfa58b6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Rialto to Ethereum PoA)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}) - max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Rialto to Ethereum PoA)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of new Headers on Rialto (Last 2 Mins)", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Ethereum PoA (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Substrate_to_Ethereum_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Rialto to Ethereum PoA Header Sync Dashboard", - "uid": "relay-rialto-to-poa-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml deleted file mode 100644 index b0038008ef6dce91697b6dc22bf0845665bdb646..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml +++ /dev/null @@ -1,4 +0,0 @@ -- targets: - - relay-headers-poa-to-rialto:9616 - - relay-poa-exchange-rialto:9616 - - relay-headers-rialto-to-poa:9616 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml b/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml deleted file mode 100644 index 6bdcb2301242047c06ae3b5c99d8c8ad0e0dcf4c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml +++ /dev/null @@ -1,94 +0,0 @@ -# This Compose file should be built using the Rialto and Eth-PoA node -# compose files. Otherwise it won't work. -# -# Exposed ports: 9616, 9716, 9816, 9916, 8080 - -version: '3.5' -services: - # We override these nodes to make sure we have the correct chain config for this network. - poa-node-arthur: &poa-node - volumes: - - ./bridges/poa-rialto/poa-config:/config - poa-node-bertha: - <<: *poa-node - poa-node-carlos: - <<: *poa-node - - # We provide an override for this particular node since this is a public facing - # node which we use to connect from things like Polkadot JS Apps. - rialto-node-charlie: - environment: - VIRTUAL_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link - VIRTUAL_PORT: 9944 - LETSENCRYPT_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - - relay-headers-poa-to-rialto: ð-poa-relay - image: paritytech/ethereum-poa-relay - entrypoint: /entrypoints/relay-headers-poa-to-rialto-entrypoint.sh - volumes: - - ./bridges/poa-rialto/entrypoints:/entrypoints - environment: - RUST_LOG: rpc=trace,bridge=trace - ports: - - "9616:9616" - depends_on: &all-nodes - - poa-node-arthur - - poa-node-bertha - - poa-node-carlos - - rialto-node-alice - - rialto-node-bob - - rialto-node-charlie - - rialto-node-dave - - rialto-node-eve - - relay-poa-exchange-rialto: - <<: *eth-poa-relay - entrypoint: /entrypoints/relay-poa-exchange-rialto-entrypoint.sh - ports: - - "9716:9616" - - relay-headers-rialto-to-poa: - <<: *eth-poa-relay - entrypoint: /entrypoints/relay-headers-rialto-to-poa-entrypoint.sh - ports: - - "9816:9616" - - poa-exchange-tx-generator: - <<: *eth-poa-relay - entrypoint: /entrypoints/poa-exchange-tx-generator-entrypoint.sh - environment: - EXCHANGE_GEN_MIN_AMOUNT_FINNEY: ${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} - EXCHANGE_GEN_MAX_AMOUNT_FINNEY: ${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} - EXCHANGE_GEN_MAX_SUBMIT_DELAY_S: ${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} - ports: - - "9916:9616" - depends_on: - - relay-headers-poa-to-rialto - - relay-headers-rialto-to-poa - - front-end: - build: - context: . - dockerfile: ./bridges/poa-rialto/Front-end.Dockerfile - args: - SUBSTRATE_PROVIDER: ${UI_SUBSTRATE_PROVIDER:-ws://localhost:9944} - ETHEREUM_PROVIDER: ${UI_ETHEREUM_PROVIDER:-http://localhost:8545} - EXPECTED_ETHEREUM_NETWORK_ID: ${UI_EXPECTED_ETHEREUM_NETWORK_ID:-105} - ports: - - "8080:80" - - # Note: These are being overridden from the top level `monitoring` compose file. - prometheus-metrics: - volumes: - - ./bridges/poa-rialto/dashboard/prometheus/targets.yml:/etc/prometheus/targets-poa-rialto.yml - depends_on: *all-nodes - - grafana-dashboard: - volumes: - - ./bridges/poa-rialto/dashboard/grafana:/etc/grafana/dashboards/poa-rialto:ro - environment: - VIRTUAL_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link - VIRTUAL_PORT: 3000 - LETSENCRYPT_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh deleted file mode 100755 index 9af373b0216f36bdbb7ed3f332111963514f2d3f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls relay binary to generate PoA -> Substrate -# exchange transaction from hardcoded PoA senders (assuming they have -# enough funds) to hardcoded Substrate recipients. - -set -eu - -# Path to relay binary -RELAY_BINARY_PATH=${RELAY_BINARY_PATH:-./ethereum-poa-relay} -# Ethereum node host -ETH_HOST=${ETH_HOST:-poa-node-arthur} -# Ethereum node websocket port -ETH_PORT=${ETH_PORT:-8546} -# Ethereum chain id -ETH_CHAIN_ID=${ETH_CHAIN_ID:-105} - -# All possible Substrate recipients (hex-encoded public keys) -SUB_RECIPIENTS=( - # Alice (5GrwvaEF...) - "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"\ - # Bob (5FHneW46...) - "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48"\ - # Charlie (5FLSigC9...) - "90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22"\ - # Dave (5DAAnrj7...) - "306721211d5404bd9da88e0204360a1a9ab8b87c66c1bc2fcdd37f3c2222cc20"\ - # Eve (5HGjWAeF...) - "e659a7a1628cdd93febc04a4e0646ea20e9f5f0ce097d9a05290d4a9e054df4e"\ - # Ferdie (5CiPPseX...) - "1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c" -) -# All possible Ethereum signers (hex-encoded private keys) -# (note that we're tracking nonce here => sender must not send concurrent transactions) -ETH_SIGNERS=( - # Bertha account (0x007594304039c2937a12220338aab821d819f5a4) and its current nonce (unknown by default) - "bc10e0f21e33456ade82182dd1ebdbdd89bca923d4e4adbd90fb5b44d7098cbe" "" -) -# Minimal exchange amount (in finney) -MIN_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} # 0.1 ETH -# Maximal exchange amount (in finney) -MAX_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} # 100 ETH -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} - -while true -do - # sleep some time - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S - - # select recipient - SUB_RECIPIENTS_MAX_INDEX=$((${#SUB_RECIPIENTS[@]} - 1)) - SUB_RECIPIENT_INDEX=`shuf -i 0-$SUB_RECIPIENTS_MAX_INDEX -n 1` - SUB_RECIPIENT=${SUB_RECIPIENTS[$SUB_RECIPIENT_INDEX]} - - # select signer - ETH_SIGNERS_MAX_INDEX=$(((${#ETH_SIGNERS[@]} - 1) / 2)) - ETH_SIGNERS_INDEX=`shuf -i 0-$ETH_SIGNERS_MAX_INDEX -n 1` - ETH_SIGNER_INDEX=$(($ETH_SIGNERS_INDEX * 2)) - ETH_SIGNER_NONCE_INDEX=$(($ETH_SIGNER_INDEX + 1)) - ETH_SIGNER=${ETH_SIGNERS[$ETH_SIGNER_INDEX]} - ETH_SIGNER_NONCE=${ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]} - if [ -z $ETH_SIGNER_NONCE ]; then - ETH_SIGNER_NONCE_ARG= - else - ETH_SIGNER_NONCE_ARG=`printf -- "--eth-nonce=%s" $ETH_SIGNER_NONCE` - fi - - # select amount - EXCHANGE_AMOUNT_FINNEY=`shuf -i $MIN_EXCHANGE_AMOUNT_FINNEY-$MAX_EXCHANGE_AMOUNT_FINNEY -n 1` - EXCHANGE_AMOUNT_ETH=`printf "%s000" $EXCHANGE_AMOUNT_FINNEY` - - # submit transaction - echo "Sending $EXCHANGE_AMOUNT_ETH from PoA:$ETH_SIGNER to Substrate:$SUB_RECIPIENT. Nonce: $ETH_SIGNER_NONCE" - set -x - SUBMIT_OUTPUT=`$RELAY_BINARY_PATH 2>&1 eth-submit-exchange-tx \ - --sub-recipient=$SUB_RECIPIENT \ - --eth-host=$ETH_HOST \ - --eth-port=$ETH_PORT \ - --eth-chain-id=$ETH_CHAIN_ID \ - --eth-signer=$ETH_SIGNER \ - --eth-amount=$EXCHANGE_AMOUNT_ETH \ - $ETH_SIGNER_NONCE_ARG` - set +x - - # update sender nonce - SUBMIT_OUTPUT_RE='nonce: ([0-9]+)' - if [[ $SUBMIT_OUTPUT =~ $SUBMIT_OUTPUT_RE ]]; then - ETH_SIGNER_NONCE=${BASH_REMATCH[1]} - ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]=$(($ETH_SIGNER_NONCE + 1)) - else - echo "Missing nonce in relay response: $SUBMIT_OUTPUT" - exit 1 - fi -done diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh deleted file mode 100755 index 432cdd6b72c5abb01fba0c3fc76260bf51cf341c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 20 -curl -v http://poa-node-arthur:8545/api/health -curl -v http://poa-node-bertha:8545/api/health -curl -v http://poa-node-carlos:8545/api/health -curl -v http://rialto-node-alice:9933/health -curl -v http://rialto-node-bob:9933/health -curl -v http://rialto-node-charlie:9933/health - -/home/user/ethereum-poa-relay eth-to-sub \ - --sub-host rialto-node-alice \ - --eth-host poa-node-arthur \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh deleted file mode 100755 index 1677cc1accde5788bb0c8fb6c38551ee5307bc42..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 20 - -curl -v http://rialto-node-bob:9933/health -curl -v http://poa-node-bertha:8545/api/health - -# Try to deploy contracts first -# networkID = 0x69 -# Arthur's key. -/home/user/ethereum-poa-relay eth-deploy-contract \ - --eth-chain-id 105 \ - --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ - --sub-host rialto-node-bob \ - --eth-host poa-node-bertha || echo "Failed to deploy contracts." - -sleep 10 -echo "Starting SUB -> ETH relay" -/home/user/ethereum-poa-relay sub-to-eth \ - --eth-contract c9a61fb29e971d1dabfd98657969882ef5d0beee \ - --eth-chain-id 105 \ - --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ - --sub-host rialto-node-bob \ - --eth-host poa-node-bertha \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh deleted file mode 100755 index 131a31ffbea9590b0570a74870f1545e74aadbf9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 20 -curl -v http://poa-node-arthur:8545/api/health -curl -v http://poa-node-bertha:8545/api/health -curl -v http://poa-node-carlos:8545/api/health -curl -v http://rialto-node-alice:9933/health -curl -v http://rialto-node-bob:9933/health -curl -v http://rialto-node-charlie:9933/health - -/home/user/ethereum-poa-relay eth-exchange-sub \ - --sub-host rialto-node-alice \ - --sub-signer //Bob \ - --eth-host poa-node-arthur \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json deleted file mode 100644 index 9e26dfeeb6e641a33dae4961196235bdb965b21b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json deleted file mode 100644 index fa59a46480c27c74d5d675908885b70e37c68330..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json deleted file mode 100644 index 7168ec4f71f73133dadc461a4c8dac0fe029bc8e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"6d1e690f-0b52-35f7-989b-46100e7c65ed","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a5b4d0466834e75c9fd29c6cbbac57ad"},"ciphertext":"102ac328cbe66d8cb8515c42e3268776a9be4419a5cb7b79852860b1e691c15b","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"e8daf2e70086b0cacf925d368fd3f60cada1285e39a42c4cc73c135368cfdbef"},"mac":"1bc3b750900a1143c64ba9e677d69e1093aab47cb003ba09f3cd595a3b422db5"},"address":"007594304039c2937a12220338aab821d819f5a4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json deleted file mode 100644 index 2f9759f7bdfe36634675b9a0123a4e6f16da2258..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"ffaebba1-f1b9-8758-7034-0314040b1396","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"97f124bc8a7bf55d00eb2755c2b50364"},"ciphertext":"b87827816f33d2bef2dc3102a8a7744b86912f8ace10e45cb282a13487769ed2","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"3114c67a05bff7831d112083f566b176bfc874aea160eebadbe5564e406ee85c"},"mac":"e9bfe8fd6f612bc036bb57659297fc03db022264f5086a1b5726972d3ab6f64a"},"address":"004e7a39907f090e19b0b80a277e77b72b22e269","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json deleted file mode 100644 index f1df56b841364039d3a325418bd9195cb87b5f91..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"ef9eb431-dc73-cf31-357e-736f64febe68","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"7077f1c4170d9fc2e05c5956be32fb51"},"ciphertext":"a053be448768d984257aeb8f9c7913e3f54c6e6e741accad9f09dd70c2d9828c","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"12580aa4624040970301e7474d3f9b2a93552bfe9ea2517f7119ccf8e91ebd0d"},"mac":"796dbb48adcfc09041fe39121632801d9f950d3c73dd47105180d8097d4f4491"},"address":"00eed42bf93b498f28acd21d207427a14074defe","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/pass b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/pass deleted file mode 100644 index f3097ab13082b70f67202aab7dd9d1b35b7ceac2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/pass +++ /dev/null @@ -1 +0,0 @@ -password diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa-node-config b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa-node-config deleted file mode 100644 index 2b3c56453d7b3d8535cb2c8853e883e34e48e088..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa-node-config +++ /dev/null @@ -1,20 +0,0 @@ -[parity] -chain = "/config/poa.json" -keys_path = "/config/keys" -no_persistent_txqueue = true - -[account] -password = ["/config/pass"] - -[network] -reserved_peers = "/config/reserved" - -[rpc] -apis = ["all"] -cors = ["moz-extension://*", "chrome-extension://*"] - -[mining] -force_sealing = true - -[misc] -unsafe_expose = true diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa.json deleted file mode 100644 index 12a8a58f263bb08c81f0e0994dfb3d21865db46d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa.json +++ /dev/null @@ -1,184 +0,0 @@ -{ - "name": "BridgePoa", - "engine": { - "authorityRound": { - "params": { - "stepDuration": 10, - "validators": { - "list": [ - "0x005e714f896a8b7cede9d38688c1a81de72a58e4", - "0x007594304039c2937a12220338aab821d819f5a4", - "0x004e7a39907f090e19b0b80a277e77b72b22e269" - ] - }, - "validateScoreTransition": 0, - "validateStepTransition": 0, - "maximumUncleCountTransition": 0, - "maximumUncleCount": 0, - "emptyStepsTransition": "0xfffffffff", - "maximumEmptySteps": 1 - } - } - }, - "params": { - "accountStartNonce": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip140Transition": "0x0", - "eip145Transition": "0x0", - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip98Transition": "0x7fffffffffffff", - "gasLimitBoundDivisor": "0x0400", - "maxCodeSize": 24576, - "maxCodeSizeTransition": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID" : "0x69", - "validateChainIdTransition": "0x0", - "validateReceiptsTransition": "0x0" - }, - "genesis": { - "seal": { - "authorityRound": { - "step": "0x0", - "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x20000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData": "0x", - "gasLimit": "0x222222" - }, - "accounts": { - "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, - "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, - "0000000000000000000000000000000000000006": { - "balance": "1", - "builtin": { - "name": "alt_bn128_add", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 500 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 150 }} - } - } - } - }, - "0000000000000000000000000000000000000007": { - "balance": "1", - "builtin": { - "name": "alt_bn128_mul", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 40000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 6000 }} - } - } - } - }, - "0000000000000000000000000000000000000008": { - "balance": "1", - "builtin": { - "name": "alt_bn128_pairing", - "pricing": { - "0": { - "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} - } - } - } - }, - "0x0000000000000000000000000000000000000009": { - "builtin": { - "name": "blake2_f", - "activate_at": "0xd751a5", - "pricing": { - "blake2_f": { - "gas_per_round": 1 - } - } - } - }, - "0x0000000000000000000000000000000000000010": { - "builtin": { - "name": "parse_substrate_header", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000011": { - "builtin": { - "name": "get_substrate_header_signal", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000012": { - "builtin": { - "name": "verify_substrate_finality_proof", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000013": { - "builtin": { - "name": "my_test", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x007594304039c2937a12220338aab821d819f5a4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x004e7a39907f090e19b0b80a277e77b72b22e269": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x00eed42bf93b498f28acd21d207427a14074defe": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - } - } -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/reserved b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/reserved deleted file mode 100644 index 209d71b7fb30f9e49a635192ffac5775a8188e58..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/reserved +++ /dev/null @@ -1,3 +0,0 @@ -enode://543d0874df46dff238d62547160f9d11e3d21897d7041bbbe46a04d2ee56d9eaf108f2133c0403159624f7647198e224d0755d23ad0e1a50c0912973af6e8a8a@poa-node-arthur:30303 -enode://710de70733e88a24032e53054985f7239e37351f5f3335a468a1a78a3026e9f090356973b00262c346a6608403df2c7107fc4def2cfe4995ea18a41292b9384f@poa-node-bertha:30303 -enode://943525f415b9482f1c49bd39eb979e4e2b406f4137450b0553bffa5cba2928e25ff89ef70f7325aad8a75dbb5955eaecc1aee7ac55d66bcaaa07c8ea58adb23a@poa-node-carlos:30303 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json index 69396162bbaa8d436c303fa334330f809d5a4b0a..32f3e53d6671232a49f7eac990c59932cb6e554c 100644 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json +++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json @@ -471,7 +471,7 @@ } ], "executionErrorState": "alerting", - "for": "5m", + "for": "7m", "frequency": "1m", "handler": 1, "name": "Messages from Millau to Rialto are not being delivered", @@ -896,7 +896,7 @@ } ], "executionErrorState": "alerting", - "for": "5m", + "for": "7m", "frequency": "1m", "handler": 1, "name": "Messages (00000001) from Millau to Rialto are not being delivered", @@ -967,8 +967,7 @@ "fill": true, "line": true, "op": "lt", - "value": 1, - "yaxis": "left" + "value": 1 } ], "timeFrom": null, @@ -1155,6 +1154,249 @@ "alignLevel": null } }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Token swap messages from Millau to Rialto are not being delivered", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "id": 23, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=\"target_latest_received\"}[20m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1 + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (73776170)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 38 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=\"source_latest_confirmed\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (73776170)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, { "datasource": "Prometheus", "fieldConfig": { @@ -1181,7 +1423,7 @@ "h": 8, "w": 8, "x": 0, - "y": 38 + "y": 49 }, "id": 16, "options": { @@ -1199,7 +1441,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "avg_over_time(Millau_to_Rialto_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-millau-rialto:9616'}[1m])", "instant": true, "interval": "", "legendFormat": "1 CPU = 100", @@ -1230,7 +1472,7 @@ "h": 8, "w": 8, "x": 8, - "y": 38 + "y": 49 }, "hiddenSeries": false, "id": 18, @@ -1257,7 +1499,7 @@ "steppedLine": false, "targets": [ { - "expr": "Millau_to_Rialto_MessageLane_00000000_system_average_load", + "expr": "system_average_load{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Average system load in last {{over}}", "refId": "A" @@ -1323,7 +1565,7 @@ "h": 8, "w": 8, "x": 16, - "y": 38 + "y": 49 }, "hiddenSeries": false, "id": 20, @@ -1350,7 +1592,7 @@ "steppedLine": false, "targets": [ { - "expr": "Millau_to_Rialto_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "expr": "process_memory_usage_bytes{instance='relay-millau-rialto:9616'} / 1024 / 1024", "interval": "", "legendFormat": "Process memory, MB", "refId": "A" diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json index 29691e0a060c695deeb99b786ac7e33924cac478..eaca8610aec7af3ad079b683a4c7b55755a7e5ca 100644 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json +++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json @@ -462,7 +462,7 @@ } ], "executionErrorState": "alerting", - "for": "5m", + "for": "7m", "frequency": "1m", "handler": 1, "name": "Messages from Rialto to Millau are not being delivered", @@ -887,7 +887,7 @@ } ], "executionErrorState": "alerting", - "for": "5m", + "for": "7m", "frequency": "1m", "handler": 1, "name": "Messages (00000001) from Rialto to Millau are not being delivered", @@ -1190,7 +1190,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "avg_over_time(Rialto_to_Millau_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-millau-rialto:9616'}[1m])", "instant": true, "interval": "", "legendFormat": "1 CPU = 100", @@ -1248,7 +1248,7 @@ "steppedLine": false, "targets": [ { - "expr": "Rialto_to_Millau_MessageLane_00000000_system_average_load", + "expr": "system_average_load{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Average system load in last {{over}}", "refId": "A" @@ -1341,7 +1341,7 @@ "steppedLine": false, "targets": [ { - "expr": "Rialto_to_Millau_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "expr": "process_memory_usage_bytes{instance='relay-millau-rialto:9616'} / 1024 / 1024", "interval": "", "legendFormat": "Process memory, MB", "refId": "A" diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json index 61ff281cc2a9b259f362d3b9f8d91331bbef510e..5280da748502e1aca960095d41d757e7cc95848e 100644 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json +++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json @@ -65,7 +65,7 @@ "targets": [ { "exemplar": true, - "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_storage_proof_overhead", + "expr": "rialto_storage_proof_overhead{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Actual overhead", "refId": "A" @@ -169,14 +169,14 @@ "targets": [ { "exemplar": true, - "expr": "Westend_to_Millau_Sync_kusama_to_base_conversion_rate / Westend_to_Millau_Sync_polkadot_to_base_conversion_rate", + "expr": "kusama_to_base_conversion_rate{instance='relay-millau-rialto:9616'} / polkadot_to_base_conversion_rate{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Outside of runtime (actually Polkadot -> Kusama)", "refId": "A" }, { "exemplar": true, - "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_millau_to_rialto_conversion_rate", + "expr": "Millau_Rialto_to_Millau_conversion_rate{instance='relay-millau-rialto:9616'}", "hide": false, "interval": "", "legendFormat": "At runtime", @@ -187,7 +187,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Rialto: Millau -> Rialto conversion rate", + "title": "Millau: Rialto -> Millau conversion rate", "tooltip": { "shared": true, "sort": 0, @@ -273,7 +273,7 @@ "targets": [ { "exemplar": true, - "expr": "Millau_to_Rialto_MessageLane_00000000_millau_storage_proof_overhead", + "expr": "millau_storage_proof_overhead{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Actual overhead", "refId": "A" @@ -377,14 +377,14 @@ "targets": [ { "exemplar": true, - "expr": "Westend_to_Millau_Sync_polkadot_to_base_conversion_rate / Westend_to_Millau_Sync_kusama_to_base_conversion_rate", + "expr": "polkadot_to_base_conversion_rate{instance='relay-millau-rialto:9616'} / kusama_to_base_conversion_rate{instance='relay-millau-rialto:9616'}", "interval": "", "legendFormat": "Outside of runtime (actually Kusama -> Polkadot)", "refId": "A" }, { "exemplar": true, - "expr": "Millau_to_Rialto_MessageLane_00000000_millau_rialto_to_millau_conversion_rate", + "expr": "Rialto_Millau_to_Rialto_conversion_rate{instance='relay-millau-rialto:9616'}", "hide": false, "interval": "", "legendFormat": "At runtime", @@ -395,7 +395,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Millau: Rialto -> Millau conversion rate", + "title": "Rialto: Millau -> Rialto conversion rate", "tooltip": { "shared": true, "sort": 0, diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml index 5f00e449c3b0bdbe491d291daf0c76f8f11d1ecf..1ff93869de1cb46f3af31f93bbe606f77d6ba0a1 100644 --- a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml +++ b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml @@ -1,4 +1,4 @@ -# Exposed ports: 10016, 10116, 10216, 10316, 10416 +# Exposed ports: 10016, 10116, 10216, 10316, 10416, 10516, 10716 version: '3.5' services: @@ -52,6 +52,7 @@ services: relay-messages-millau-to-rialto-generator: <<: *sub-bridge-relay environment: + RUST_LOG: bridge=trace MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" entrypoint: /entrypoints/relay-messages-to-rialto-generator-entrypoint.sh ports: @@ -59,13 +60,24 @@ services: depends_on: - relay-millau-rialto + relay-messages-millau-to-rialto-resubmitter: + <<: *sub-bridge-relay + environment: + RUST_LOG: bridge=trace + entrypoint: /entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh + ports: + - "10316:9616" + depends_on: + - relay-messages-millau-to-rialto-generator + relay-messages-rialto-to-millau-lane-00000001: <<: *sub-bridge-relay environment: + RUST_LOG: bridge=trace MSG_EXCHANGE_GEN_LANE: "00000001" entrypoint: /entrypoints/relay-messages-rialto-to-millau-entrypoint.sh ports: - - "10316:9616" + - "10416:9616" depends_on: - relay-millau-rialto @@ -75,7 +87,15 @@ services: MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" entrypoint: /entrypoints/relay-messages-to-millau-generator-entrypoint.sh ports: - - "10416:9616" + - "10516:9616" + depends_on: + - relay-millau-rialto + + relay-token-swap-generator: + <<: *sub-bridge-relay + entrypoint: /entrypoints/relay-token-swap-generator-entrypoint.sh + ports: + - "10716:9616" depends_on: - relay-millau-rialto diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh index 26be814b6941fad513461bf18104d57d8d566ad2..758dce2515aa8586dcddbd2738259f08fb2b4fd6 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh @@ -1,13 +1,13 @@ #!/bin/bash set -xeu -sleep 20 +sleep 60 curl -v http://millau-node-bob:9933/health curl -v http://rialto-node-bob:9933/health MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -/home/user/substrate-relay relay-messages MillauToRialto \ +/home/user/substrate-relay relay-messages millau-to-rialto \ --lane $MESSAGE_LANE \ --source-host millau-node-bob \ --source-port 9944 \ diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh index 04bde07ad971b39c1dd495f7015586f303ebb2f8..e0731e9058d1dea3af074a93b34d105db36443f5 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh @@ -1,13 +1,13 @@ #!/bin/bash set -xeu -sleep 20 +sleep 60 curl -v http://millau-node-bob:9933/health curl -v http://rialto-node-bob:9933/health MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -/home/user/substrate-relay relay-messages RialtoToMillau \ +/home/user/substrate-relay relay-messages rialto-to-millau \ --lane $MESSAGE_LANE \ --source-host rialto-node-bob \ --source-port 9944 \ diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh index 96676bad85b03ccd5f6c7233d799b4dfa9255447..b8d051a13122bf0388e1ab5ec37dbc774a26e676 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh @@ -14,7 +14,7 @@ SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=1024 FERDIE_ADDR=5oSLwptwgySxh5vz1HdvznQJjbQVgwYSvHEpYYeTXu1Ei8j7 -SHARED_CMD="/home/user/substrate-relay send-message RialtoToMillau" +SHARED_CMD="/home/user/substrate-relay send-message rialto-to-millau" SHARED_HOST="--source-host rialto-node-bob --source-port 9944" DAVE_SIGNER="--source-signer //Dave --target-signer //Dave" @@ -25,6 +25,8 @@ rand_sleep() { SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` echo "Sleeping $SUBMIT_DELAY_S seconds..." sleep $SUBMIT_DELAY_S + NOW=`date "+%Y-%m-%d %H:%M:%S"` + echo "Woke up at $NOW" } # start sending large messages immediately @@ -32,6 +34,10 @@ LARGE_MESSAGES_TIME=0 # start sending message packs in a hour BUNCH_OF_MESSAGES_TIME=3600 +# give conversion rate updater some time to update Millau->Rialto conversion rate in Rialto +# (initially rate=1 and rational relayer won't deliver any messages if it'll be changed to larger value) +sleep 180 + while true do rand_sleep @@ -46,6 +52,7 @@ do $SEND_MESSAGE \ --lane $SECONDARY_MESSAGE_LANE \ --origin Target \ + --dispatch-fee-payment at-target-chain \ remark fi diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh index c24ec8ea7f40a7e8825b9419b5ee61964425ba8a..0365ebe1d8b46b8d79ce4ce01d01ff4bfc049cc7 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh @@ -14,7 +14,7 @@ SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=128 FERDIE_ADDR=6ztG3jPnJTwgZnnYsgCDXbbQVR82M96hBZtPvkN56A9668ZC -SHARED_CMD=" /home/user/substrate-relay send-message MillauToRialto" +SHARED_CMD=" /home/user/substrate-relay send-message millau-to-rialto" SHARED_HOST="--source-host millau-node-bob --source-port 9944" DAVE_SIGNER="--target-signer //Dave --source-signer //Dave" @@ -25,6 +25,8 @@ rand_sleep() { SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` echo "Sleeping $SUBMIT_DELAY_S seconds..." sleep $SUBMIT_DELAY_S + NOW=`date "+%Y-%m-%d %H:%M:%S"` + echo "Woke up at $NOW" } # start sending large messages immediately @@ -32,6 +34,10 @@ LARGE_MESSAGES_TIME=0 # start sending message packs in a hour BUNCH_OF_MESSAGES_TIME=3600 +# give conversion rate updater some time to update Rialto->Millau conversion rate in Millau +# (initially rate=1 and rational relayer won't deliver any messages if it'll be changed to larger value) +sleep 180 + while true do rand_sleep @@ -46,6 +52,7 @@ do $SEND_MESSAGE \ --lane $SECONDARY_MESSAGE_LANE \ --origin Target \ + --dispatch-fee-payment at-target-chain \ remark fi diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..ca4c9f03a8bb80672ea3d37684c63c039ddbbf10 --- /dev/null +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -xeu + +sleep 20 +curl -v http://millau-node-alice:9933/health + +# //Dave is signing Millau -> Rialto message-send transactions, which are causing problems. +# +# When large message is being sent from Millau to Rialto AND other transactions are +# blocking it from being mined, we'll see something like this in logs: +# +# Millau transaction priority with tip=0: 17800827994. Target priority: +# 526186677695 +# +# So since fee multiplier in Millau is `1` and `WeightToFee` is `IdentityFee`, then +# we need tip around `526186677695 - 17800827994 = 508_385_849_701`. Let's round it +# up to `1_000_000_000_000`. + +/home/user/substrate-relay resubmit-transactions millau \ + --target-host millau-node-alice \ + --target-port 9944 \ + --target-signer //Dave \ + --stalled-blocks 5 \ + --tip-limit 1000000000000 \ + --tip-step 1000000000 \ + make-it-best-transaction diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh index 4b50ac086a882b25be4920a10d49163cd5a12852..c87591fb6dbb75d6f8b2c26ed30e2c2d55f184f6 100755 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh @@ -1,18 +1,18 @@ #!/bin/bash set -xeu -sleep 20 +sleep 60 curl -v http://millau-node-alice:9933/health curl -v http://rialto-node-alice:9933/health -/home/user/substrate-relay init-bridge MillauToRialto \ +/home/user/substrate-relay init-bridge millau-to-rialto \ --source-host millau-node-alice \ --source-port 9944 \ --target-host rialto-node-alice \ --target-port 9944 \ --target-signer //Alice -/home/user/substrate-relay init-bridge RialtoToMillau \ +/home/user/substrate-relay init-bridge rialto-to-millau \ --source-host rialto-node-alice \ --source-port 9944 \ --target-host millau-node-alice \ @@ -26,8 +26,11 @@ sleep 6 --millau-host millau-node-alice \ --millau-port 9944 \ --millau-signer //Charlie \ + --millau-messages-pallet-owner=//RialtoMessagesOwner \ --rialto-host rialto-node-alice \ --rialto-port 9944 \ --rialto-signer //Charlie \ + --rialto-messages-pallet-owner=//MillauMessagesOwner \ --lane=00000000 \ + --lane=73776170 \ --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..95bbe1e38fb295d80a83c30279c3c144ebd8a38c --- /dev/null +++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT +# +# This scripts periodically calls the Substrate relay binary to generate messages. These messages +# are sent from the Millau network to the Rialto network. + +set -eu + +# Max delay before submitting transactions (s) +MAX_SUBMIT_DELAY_S=60 +SOURCE_HOST=millau-node-charlie +SOURCE_PORT=9944 +TARGET_HOST=rialto-node-charlie +TARGET_PORT=9944 + +# Sleep a bit between messages +rand_sleep() { + SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` + echo "Sleeping $SUBMIT_DELAY_S seconds..." + sleep $SUBMIT_DELAY_S + NOW=`date "+%Y-%m-%d %H:%M:%S"` + echo "Woke up at $NOW" +} + +# give conversion rate updater some time to update Rialto->Millau conversion rate in Millau +# (initially rate=1 and rational relayer won't deliver any messages if it'll be changed to larger value) +sleep 180 + +while true +do + rand_sleep + echo "Initiating token-swap between Rialto and Millau" + /home/user/substrate-relay \ + swap-tokens \ + millau-to-rialto \ + --source-host $SOURCE_HOST \ + --source-port $SOURCE_PORT \ + --source-signer //WithRialtoTokenSwap \ + --source-balance 100000 \ + --target-host $TARGET_HOST \ + --target-port $TARGET_PORT \ + --target-signer //WithMillauTokenSwap \ + --target-balance 200000 \ + lock-until-block \ + --blocks-before-expire 32 +done diff --git a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json index e73ddea40f1a6bc079b860174977f9f0c47de15e..1a3603512fdf056e58f2ddcfae4b4b86f7cdeec2 100644 --- a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json +++ b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json @@ -24,7 +24,7 @@ { "evaluator": { "params": [ - 5 + 32 ], "type": "gt" }, @@ -46,11 +46,11 @@ } ], "executionErrorState": "alerting", - "for": "5m", + "for": "60m", "frequency": "5m", "handler": 1, "message": "", - "name": "Synced Header Difference is Over 5 (Westend to Millau)", + "name": "Synced Header Difference is Over 32 (Westend to Millau)", "noDataState": "no_data", "notifications": [] }, @@ -163,7 +163,7 @@ { "evaluator": { "params": [ - 5 + 32 ], "type": "lt" }, @@ -185,7 +185,7 @@ } ], "executionErrorState": "alerting", - "for": "3m", + "for": "60m", "frequency": "5m", "handler": 1, "name": "No New Headers (Westend to Millau)", @@ -237,9 +237,9 @@ "steppedLine": false, "targets": [ { - "expr": "max_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])", + "expr": "max_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[10m])-min_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[10m])", "interval": "", - "legendFormat": "Number of new Headers on Westend (Last 2 Mins)", + "legendFormat": "Number of new Headers on Westend (Last 10 Mins)", "refId": "A" } ], @@ -401,7 +401,7 @@ "steppedLine": false, "targets": [ { - "expr": "Westend_to_Millau_Sync_system_average_load", + "expr": "system_average_load{instance='relay-headers-westend-to-millau:9616'}", "interval": "", "legendFormat": "Average system load in last {{over}}", "refId": "A" @@ -500,7 +500,7 @@ "pluginVersion": "7.1.3", "targets": [ { - "expr": "avg_over_time(Westend_to_Millau_Sync_process_cpu_usage_percentage[1m])", + "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-headers-westend-to-millau:9616'}[1m])", "instant": true, "interval": "", "legendFormat": "1 CPU = 100", @@ -615,7 +615,7 @@ "steppedLine": false, "targets": [ { - "expr": "Westend_to_Millau_Sync_process_memory_usage_bytes / 1024 / 1024", + "expr": "process_memory_usage_bytes{instance='relay-headers-westend-to-millau:9616'} / 1024 / 1024", "interval": "", "legendFormat": "Process memory, MB", "refId": "A" diff --git a/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh index 4a96ade6ec85302a936e493791fcb2f4213c7574..d3b6932983fba343392ce35568ace2885b6a498c 100755 --- a/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh +++ b/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh @@ -1,11 +1,11 @@ #!/bin/bash set -xeu -sleep 20 +sleep 60 curl -v http://millau-node-alice:9933/health curl -v https://westend-rpc.polkadot.io:443/health -/home/user/substrate-relay init-bridge WestendToMillau \ +/home/user/substrate-relay init-bridge westend-to-millau \ --source-host westend-rpc.polkadot.io \ --source-port 443 \ --source-secure \ @@ -15,11 +15,12 @@ curl -v https://westend-rpc.polkadot.io:443/health # Give chain a little bit of time to process initialization transaction sleep 6 -/home/user/substrate-relay relay-headers WestendToMillau \ +/home/user/substrate-relay relay-headers westend-to-millau \ --source-host westend-rpc.polkadot.io \ --source-port 443 \ --source-secure \ --target-host millau-node-alice \ --target-port 9944 \ --target-signer //George \ + --target-transactions-mortality=4\ --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/address_book.json b/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/address_book.json deleted file mode 100644 index 9e26dfeeb6e641a33dae4961196235bdb965b21b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/address_book.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/arthur.json b/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/arthur.json deleted file mode 100644 index fa59a46480c27c74d5d675908885b70e37c68330..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/arthur.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/dev/poa-config/pass b/polkadot/bridges/deployments/dev/poa-config/pass deleted file mode 100644 index f3097ab13082b70f67202aab7dd9d1b35b7ceac2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/pass +++ /dev/null @@ -1 +0,0 @@ -password diff --git a/polkadot/bridges/deployments/dev/poa-config/poa-node-config b/polkadot/bridges/deployments/dev/poa-config/poa-node-config deleted file mode 100644 index 146bbac17cf9e12bff3f7ecb12515cb2642bc5b9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/poa-node-config +++ /dev/null @@ -1,17 +0,0 @@ -[parity] -chain = "./deployments/dev/poa-config/poa.json" -keys_path = "./deployments/dev/poa-config/keys" -no_persistent_txqueue = true - -[account] -password = ["./deployments/dev/poa-config/pass"] - -[rpc] -apis = ["all"] -cors = ["moz-extension://*", "chrome-extension://*"] - -[mining] -force_sealing = true - -[misc] -unsafe_expose = true diff --git a/polkadot/bridges/deployments/dev/poa-config/poa.json b/polkadot/bridges/deployments/dev/poa-config/poa.json deleted file mode 100644 index ecc21766b035907ac5cfcc61bcf70752d93c4ee6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/poa.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "name": "BridgePoa", - "engine": { - "authorityRound": { - "params": { - "stepDuration": 10, - "validators": { - "list": [ - "0x005e714f896a8b7cede9d38688c1a81de72a58e4" - ] - }, - "validateScoreTransition": 0, - "validateStepTransition": 0, - "maximumUncleCountTransition": 0, - "maximumUncleCount": 0, - "emptyStepsTransition": "0xfffffffff", - "maximumEmptySteps": 1 - } - } - }, - "params": { - "accountStartNonce": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip140Transition": "0x0", - "eip145Transition": "0x0", - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip98Transition": "0x7fffffffffffff", - "gasLimitBoundDivisor": "0x0400", - "maxCodeSize": 24576, - "maxCodeSizeTransition": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID" : "0x69", - "validateChainIdTransition": "0x0", - "validateReceiptsTransition": "0x0" - }, - "genesis": { - "seal": { - "authorityRound": { - "step": "0x0", - "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x20000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData": "0x", - "gasLimit": "0x222222" - }, - "accounts": { - "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, - "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, - "0000000000000000000000000000000000000006": { - "balance": "1", - "builtin": { - "name": "alt_bn128_add", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 500 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 150 }} - } - } - } - }, - "0000000000000000000000000000000000000007": { - "balance": "1", - "builtin": { - "name": "alt_bn128_mul", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 40000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 6000 }} - } - } - } - }, - "0000000000000000000000000000000000000008": { - "balance": "1", - "builtin": { - "name": "alt_bn128_pairing", - "pricing": { - "0": { - "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} - } - } - } - }, - "0x0000000000000000000000000000000000000009": { - "builtin": { - "name": "blake2_f", - "activate_at": "0xd751a5", - "pricing": { - "blake2_f": { - "gas_per_round": 1 - } - } - } - }, - "0x0000000000000000000000000000000000000010": { - "builtin": { - "name": "parse_substrate_header", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000011": { - "builtin": { - "name": "get_substrate_header_signal", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000012": { - "builtin": { - "name": "verify_substrate_finality_proof", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000013": { - "builtin": { - "name": "my_test", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x007594304039c2937a12220338aab821d819f5a4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x004e7a39907f090e19b0b80a277e77b72b22e269": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - } - } -} diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh index 2736243c5a48fab7cdc2c2c5bac963e1dbdac4a3..61028e1756b3cad0f46f59f8523c2b806e7e7e13 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh @@ -8,14 +8,14 @@ set -xeu -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge RococoToWococo \ +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge rococo-to-wococo \ --source-host 127.0.0.1 \ --source-port 9955 \ --target-host 127.0.0.1 \ --target-port 9944 \ --target-signer //Alice -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers RococoToWococo \ +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers rococo-to-wococo \ --source-host 127.0.0.1 \ --source-port 9955 \ --target-host 127.0.0.1 \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh index b3a7e383d9b950c0d18e378f87e20a570df86950..c57db2086fb412338efd7af1efd27c229d15f175 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh @@ -8,14 +8,14 @@ set -xeu -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge WococoToRococo \ +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge wococo-to-rococo \ --source-host 127.0.0.1 \ --source-port 9944 \ --target-host 127.0.0.1 \ --target-port 9955 \ --target-signer //Alice -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers WococoToRococo \ +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers wococo-to-rococo \ --source-host 127.0.0.1 \ --source-port 9944 \ --target-host 127.0.0.1 \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh index 5b298a149f8a30a6346db4b187b6d1315cfb0ed9..d420dc56c263f66a95401fd49a276bdcfe68bd9c 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh @@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}" RIALTO_PORT="${RIALTO_PORT:-9944}" RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-messages MillauToRialto \ +./target/debug/substrate-relay relay-messages millau-to-rialto \ --lane 00000000 \ --source-host localhost \ --source-port $MILLAU_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh index 616697192b961e33c9ec530e1ddb9f0a6659b1c6..0cd73c00454d9ff5d5c408f49da8c66d4a68a0c3 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh @@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}" RIALTO_PORT="${RIALTO_PORT:-9944}" RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-messages RialtoToMillau \ +./target/debug/substrate-relay relay-messages rialto-to-millau \ --lane 00000000 \ --source-host localhost \ --source-port $RIALTO_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh index 59c75de3899fb482eb39e8c155e2381b11156c0c..8b18cff2b53c22081d06732005ea8ecb50dc4528 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh @@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}" RIALTO_PORT="${RIALTO_PORT:-9944}" RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge MillauToRialto \ +./target/debug/substrate-relay init-bridge millau-to-rialto \ --source-host localhost \ --source-port $MILLAU_PORT \ --target-host localhost \ @@ -18,7 +18,7 @@ RUST_LOG=bridge=debug \ sleep 5 RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers MillauToRialto \ +./target/debug/substrate-relay relay-headers millau-to-rialto \ --source-host localhost \ --source-port $MILLAU_PORT \ --target-host localhost \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh index 6382cdca82374bafd22f52e88db0a4b1cd7c4cd4..c66c994f06ab675420b4863b1090e3dae7302de6 100755 --- a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh +++ b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh @@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}" RIALTO_PORT="${RIALTO_PORT:-9944}" RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge RialtoToMillau \ +./target/debug/substrate-relay init-bridge rialto-to-millau \ --target-host localhost \ --target-port $MILLAU_PORT \ --source-host localhost \ @@ -18,7 +18,7 @@ RUST_LOG=bridge=debug \ sleep 5 RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers RialtoToMillau \ +./target/debug/substrate-relay relay-headers rialto-to-millau \ --target-host localhost \ --target-port $MILLAU_PORT \ --source-host localhost \ diff --git a/polkadot/bridges/deployments/networks/OpenEthereum.Dockerfile b/polkadot/bridges/deployments/networks/OpenEthereum.Dockerfile deleted file mode 100644 index 15fe72d295eb6e5dfc034b14362e101bda30229a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/networks/OpenEthereum.Dockerfile +++ /dev/null @@ -1,91 +0,0 @@ -FROM docker.io/library/ubuntu:xenial AS builder - -# show backtraces -ENV RUST_BACKTRACE 1 - -ENV LAST_DEPS_UPDATE 2020-06-19 - -# install tools and dependencies -RUN set -eux; \ - apt-get update && \ - apt-get install -y file curl jq ca-certificates && \ - apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev - -ENV LAST_CERTS_UPDATE 2020-06-19 - -RUN update-ca-certificates && \ - curl https://sh.rustup.rs -sSf | sh -s -- -y - -ENV PATH="/root/.cargo/bin:${PATH}" -ENV LAST_RUST_UPDATE="2020-09-09" -RUN rustup update stable && \ - rustup install nightly && \ - rustup target add wasm32-unknown-unknown --toolchain nightly - -RUN rustc -vV && \ - cargo -V && \ - gcc -v && \ - g++ -v && \ - cmake --version - -WORKDIR /openethereum - -### Build from the repo -ARG ETHEREUM_REPO=https://github.com/paritytech/openethereum.git -ARG ETHEREUM_HASH=344991dbba2bc8657b00916f0e4b029c66f159e8 -RUN git clone $ETHEREUM_REPO /openethereum && git checkout $ETHEREUM_HASH - -### Build locally. Make sure to set the CONTEXT to main directory of the repo. -# ADD openethereum /openethereum - -WORKDIR /parity-bridges-common - -### Build from the repo -# Build using `master` initially. -ARG BRIDGE_REPO=https://github.com/paritytech/parity-bridges-common -RUN git clone $BRIDGE_REPO /parity-bridges-common && git checkout master - -WORKDIR /openethereum -RUN cargo build --release --verbose || true - -# Then rebuild by switching to a different branch to only incrementally -# build the changes. -WORKDIR /parity-bridges-common -ARG BRIDGE_HASH=master -RUN git checkout . && git fetch && git checkout $BRIDGE_HASH -### Build locally. Make sure to set the CONTEXT to main directory of the repo. -# ADD . /parity-bridges-common - -WORKDIR /openethereum -RUN cargo build --release --verbose -RUN strip ./target/release/openethereum - -FROM docker.io/library/ubuntu:xenial - -# show backtraces -ENV RUST_BACKTRACE 1 - -RUN set -eux; \ - apt-get update && \ - apt-get install -y curl - -RUN groupadd -g 1000 openethereum \ - && useradd -u 1000 -g openethereum -s /bin/sh -m openethereum - -# switch to user openethereum here -USER openethereum - -WORKDIR /home/openethereum - -COPY --chown=openethereum:openethereum --from=builder /openethereum/target/release/openethereum ./ -# Solve issues with custom --keys-path -RUN mkdir -p ~/.local/share/io.parity.ethereum/keys/ -# check if executable works in this container -RUN ./openethereum --version - -EXPOSE 8545 8546 30303/tcp 30303/udp - -HEALTHCHECK --interval=2m --timeout=5s \ - CMD curl -f http://localhost:8545/api/health || exit 1 - -ENTRYPOINT ["/home/openethereum/openethereum"] diff --git a/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh b/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..0898978096d33111f53d2a72119fa75dc2e1ca7d --- /dev/null +++ b/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -xeu + +/home/user/rialto-bridge-node build-spec \ + --chain local \ + --raw \ + --disable-default-bootnode \ + > /rialto-share/rialto-relaychain-spec-raw.json + +# we're using local driver + tmpfs for shared `/rialto-share` volume, which is populated +# by the container running this script. If this script ends, the volume will be detached +# and our chain spec will be lost when it'll go online again. Hence the never-ending +# script which keeps volume online until container is stopped. +tail -f /dev/null diff --git a/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh b/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh new file mode 100755 index 0000000000000000000000000000000000000000..172502327c9a071a3581a1a60dff955667f6e29f --- /dev/null +++ b/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -xeu + +sleep 60 +curl -v http://rialto-node-alice:9933/health +curl -v http://rialto-parachain-collator-alice:9933/health + +/home/user/substrate-relay register-parachain rialto-parachain \ + --parachain-host rialto-parachain-collator-alice \ + --parachain-port 9944 \ + --relaychain-host rialto-node-alice \ + --relaychain-port 9944 \ + --relaychain-signer //Alice diff --git a/polkadot/bridges/deployments/networks/eth-poa.yml b/polkadot/bridges/deployments/networks/eth-poa.yml deleted file mode 100644 index 7291a2ccfd70b30f7b5e4dd0bf8a28490cbf8683..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/networks/eth-poa.yml +++ /dev/null @@ -1,46 +0,0 @@ -# Compose file for quickly spinning up a local instance of an Ethereum PoA network. -# -# Note that this PoA network is only used for testing, so the configuration settings you see here -# are *not* recommended for a production environment. -# -# For example, do *not* keep your account key in version control, and unless you're _really_ sure -# you want to provide public access to your nodes do *not* publicly expose RPC methods. -version: '3.5' -services: - poa-node-arthur: &poa-node - image: hcastano/openethereum-bridge-builtins - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=arthur - - --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 - environment: - RUST_LOG: rpc=trace,txqueue=trace,bridge-builtin=trace - ports: - - "8545:8545" - - "8546:8546" - - "30303:30303" - - poa-node-bertha: - <<: *poa-node - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=bertha - - --engine-signer=0x007594304039c2937a12220338aab821d819f5a4 - ports: - - "8645:8545" - - "8646:8546" - - "31303:30303" - - poa-node-carlos: - <<: *poa-node - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=carlos - - --engine-signer=0x004e7a39907f090e19b0b80a277e77b72b22e269 - ports: - - "8745:8545" - - "8746:8546" - - "32303:30303" diff --git a/polkadot/bridges/deployments/networks/millau.yml b/polkadot/bridges/deployments/networks/millau.yml index 54790579f1c165d1991bbcd83b2eadf44986421f..d42c1d7d07cb6e9748eb5e55e8d04cfcbb5523ee 100644 --- a/polkadot/bridges/deployments/networks/millau.yml +++ b/polkadot/bridges/deployments/networks/millau.yml @@ -20,7 +20,7 @@ services: - --unsafe-rpc-external - --unsafe-ws-external environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,sc_basic_authorship=trace,beefy=debug ports: - "19933:9933" - "19944:9944" diff --git a/polkadot/bridges/deployments/networks/rialto-parachain.yml b/polkadot/bridges/deployments/networks/rialto-parachain.yml new file mode 100644 index 0000000000000000000000000000000000000000..b2d2188f1babb2406d98c95cb1f5cc4e78a1ef47 --- /dev/null +++ b/polkadot/bridges/deployments/networks/rialto-parachain.yml @@ -0,0 +1,90 @@ +# Compose file for quickly spinning up a local instance of the Rialto Parachain network. +# +# Since Rialto Parachain is unusable without Rialto, this file depends on some Rialto +# network nodes. +version: '3.5' +services: + rialto-parachain-collator-alice: &rialto-parachain-collator + image: paritytech/rialto-parachain-collator + entrypoint: > + /home/user/rialto-parachain-collator + --alice + --collator + --force-authoring + --parachain-id 2000 + --rpc-port 9933 + --ws-port 9944 + --rpc-cors=all + --unsafe-rpc-external + --unsafe-ws-external + -- + --execution wasm + --chain /rialto-share/rialto-relaychain-spec-raw.json + --rpc-port 9934 + --ws-port 9945 + volumes: + - rialto-share:/rialto-share:z + environment: + RUST_LOG: runtime=trace,rpc=trace,txpool=trace,parachain=trace,parity_ws=trace + depends_on: + - rialto-chainspec-exporter + ports: + - "20433:9933" + - "20444:9944" + + rialto-parachain-collator-bob: + <<: *rialto-parachain-collator + entrypoint: > + /home/user/rialto-parachain-collator + --bob + --collator + --force-authoring + --parachain-id 2000 + --rpc-port 9933 + --ws-port 9944 + --rpc-cors=all + --unsafe-rpc-external + --unsafe-ws-external + -- + --execution wasm + --chain /rialto-share/rialto-relaychain-spec-raw.json + --rpc-port 9934 + --ws-port 9945 + ports: + - "20533:9933" + - "20544:9944" + + rialto-parachain-collator-charlie: + <<: *rialto-parachain-collator + entrypoint: > + /home/user/rialto-parachain-collator + --charlie + --collator + --force-authoring + --parachain-id 2000 + --rpc-port 9933 + --ws-port 9944 + --rpc-cors=all + --unsafe-rpc-external + --unsafe-ws-external + -- + --execution wasm + --chain /rialto-share/rialto-relaychain-spec-raw.json + --rpc-port 9934 + --ws-port 9945 + ports: + - "20633:9933" + - "20644:9944" + + rialto-parachain-registrar: + image: paritytech/substrate-relay + entrypoint: /entrypoints/rialto-parachain-registrar-entrypoint.sh + volumes: + - ./networks/entrypoints:/entrypoints + - rialto-share:/rialto-share:z + environment: + RUST_LOG: bridge=trace + depends_on: + - rialto-node-alice + - rialto-parachain-collator-alice + diff --git a/polkadot/bridges/deployments/networks/rialto.yml b/polkadot/bridges/deployments/networks/rialto.yml index 3039d7c33bcd41bac0397b7ac96ea4f1aa21bec0..0a484b2dad7511fa34b1b7447cbfd7dd0f0f92db 100644 --- a/polkadot/bridges/deployments/networks/rialto.yml +++ b/polkadot/bridges/deployments/networks/rialto.yml @@ -20,7 +20,7 @@ services: - --unsafe-rpc-external - --unsafe-ws-external environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,beefy=debug ports: - "9933:9933" - "9944:9944" @@ -85,3 +85,20 @@ services: ports: - "10333:9933" - "10344:9944" + + rialto-chainspec-exporter: + image: paritytech/rialto-bridge-node + entrypoint: /entrypoints/rialto-chainspec-exporter-entrypoint.sh + volumes: + - ./networks/entrypoints:/entrypoints + - rialto-share:/rialto-share:z + +# we're using `/rialto-share` to expose Rialto chain spec to those who are interested. Right +# now it is Rialto Parachain collator nodes. Local + tmpfs combination allows sharing writable +# in-memory volumes, which are dropped when containers are stopped. +volumes: + rialto-share: + driver: local + driver_opts: + type: "tmpfs" + device: "tmpfs" diff --git a/polkadot/bridges/deployments/run.sh b/polkadot/bridges/deployments/run.sh index a79638352a383e11c5e40823d353594480ccd5ab..5c1cded1e832102575adad53cc48f8ae05e886b2 100755 --- a/polkadot/bridges/deployments/run.sh +++ b/polkadot/bridges/deployments/run.sh @@ -4,7 +4,7 @@ # # To deploy a network you can run this script with the name of the bridge (or multiple bridges) you want to run. # -# `./run.sh poa-rialto rialto-millau` +# `./run.sh westend-millau rialto-millau` # # To update a deployment to use the latest images available from the Docker Hub add the `update` # argument after the bridge name. @@ -30,22 +30,22 @@ function show_help () { echo Error: $1 echo " " echo "Usage:" - echo " ./run.sh poa-rialto [stop|update] Run PoA <> Rialto Networks & Bridge" echo " ./run.sh rialto-millau [stop|update] Run Rialto <> Millau Networks & Bridge" echo " ./run.sh westend-millau [stop|update] Run Westend -> Millau Networks & Bridge" echo " " echo "Options:" echo " --no-monitoring Disable monitoring" + echo " --no-ui Disable UI" echo " " echo "You can start multiple bridges at once by passing several bridge names:" - echo " ./run.sh poa-rialto rialto-millau westend-millau [stop|update]" + echo " ./run.sh rialto-millau westend-millau [stop|update]" exit 1 } -RIALTO=' -f ./networks/rialto.yml' +RIALTO=' -f ./networks/rialto.yml -f ./networks/rialto-parachain.yml' MILLAU=' -f ./networks/millau.yml' -ETH_POA=' -f ./networks/eth-poa.yml' MONITORING=' -f ./monitoring/docker-compose.yml' +UI=' -f ./ui/docker-compose.yml' BRIDGES=() NETWORKS='' @@ -58,13 +58,10 @@ do shift continue ;; - poa-rialto) - BRIDGES+=($i) - NETWORKS+=${RIALTO} - RIALTO='' - NETWORKS+=${ETH_POA} - ETH_POA='' + --no-ui) + UI="" shift + continue ;; rialto-millau) BRIDGES+=($i) @@ -94,7 +91,7 @@ if [ ${#BRIDGES[@]} -eq 0 ]; then show_help "Missing bridge name." fi -COMPOSE_FILES=$NETWORKS$MONITORING +COMPOSE_FILES=$NETWORKS$MONITORING$UI # Compose looks for .env files in the the current directory by default, we don't want that COMPOSE_ARGS="--project-directory ." diff --git a/polkadot/bridges/deployments/types-millau.json b/polkadot/bridges/deployments/types-millau.json index a15527f59d794d5f2025a18808e5f999b71fd78d..6d651b4c7cf733f01ee93c26bc0d981ffccf28b0 100644 --- a/polkadot/bridges/deployments/types-millau.json +++ b/polkadot/bridges/deployments/types-millau.json @@ -1,5 +1,7 @@ { "--1": "Millau Types", + "MillauAddress": "AccountId", + "MillauLookupSource": "AccountId", "MillauBalance": "u64", "MillauBlockHash": "H512", "MillauBlockNumber": "u64", @@ -25,6 +27,8 @@ } }, "--2": "Rialto Types", + "RialtoAddress": "MultiAddress", + "RialtoLookupSource": "MultiAddress", "RialtoBalance": "u128", "RialtoBlockHash": "H256", "RialtoBlockNumber": "u32", @@ -50,8 +54,6 @@ } }, "--3": "Common types", - "Address": "AccountId", - "LookupSource": "AccountId", "AccountSigner": "MultiSigner", "SpecVersion": "u32", "RelayerId": "AccountId", @@ -70,7 +72,7 @@ "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", - "MessageId": "(Id, u64)", + "BridgeMessageId": "(Id, u64)", "MessageKey": { "lane_id": "LaneId", "nonce:": "MessageNonce" @@ -90,9 +92,9 @@ "dispatch_results": "BitVec" }, "OutboundLaneData": { - "latest_generated_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", - "oldest_unpruned_nonce": "MessageNonce" + "latest_generated_nonce": "MessageNonce" }, "MessageData": { "payload": "MessagePayload", @@ -172,6 +174,8 @@ "commit": "Commit", "votes_ancestries": "Vec" }, + "Address": "MillauAddress", + "LookupSource": "MillauLookupSource", "Fee": "MillauBalance", "Balance": "MillauBalance", "Hash": "MillauBlockHash", diff --git a/polkadot/bridges/deployments/types-rialto.json b/polkadot/bridges/deployments/types-rialto.json index 5375e43aea451dde9082ab2791f38db27e908d9b..a574e1178936cfffc1b12dba32cd47b522459f9f 100644 --- a/polkadot/bridges/deployments/types-rialto.json +++ b/polkadot/bridges/deployments/types-rialto.json @@ -1,5 +1,7 @@ { "--1": "Millau Types", + "MillauAddress": "AccountId", + "MillauLookupSource": "AccountId", "MillauBalance": "u64", "MillauBlockHash": "H512", "MillauBlockNumber": "u64", @@ -25,6 +27,8 @@ } }, "--2": "Rialto Types", + "RialtoAddress": "MultiAddress", + "RialtoLookupSource": "MultiAddress", "RialtoBalance": "u128", "RialtoBlockHash": "H256", "RialtoBlockNumber": "u32", @@ -50,8 +54,6 @@ } }, "--3": "Common types", - "Address": "AccountId", - "LookupSource": "AccountId", "AccountSigner": "MultiSigner", "SpecVersion": "u32", "RelayerId": "AccountId", @@ -70,7 +72,7 @@ "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", - "MessageId": "(Id, u64)", + "BridgeMessageId": "(Id, u64)", "MessageKey": { "lane_id": "LaneId", "nonce:": "MessageNonce" @@ -90,9 +92,9 @@ "dispatch_results": "BitVec" }, "OutboundLaneData": { - "latest_generated_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", - "oldest_unpruned_nonce": "MessageNonce" + "latest_generated_nonce": "MessageNonce" }, "MessageData": { "payload": "MessagePayload", @@ -172,6 +174,8 @@ "commit": "Commit", "votes_ancestries": "Vec" }, + "Address": "RialtoAddress", + "LookupSource": "RialtoLookupSource", "Fee": "RialtoBalance", "Balance": "RialtoBalance", "BlockHash": "RialtoBlockHash", @@ -183,5 +187,6 @@ "_enum": { "RialtoToMillauConversionRate": "u128" } - } + }, + "ValidationCodeHash": "H256" } diff --git a/polkadot/bridges/deployments/types-rococo.json b/polkadot/bridges/deployments/types-rococo.json index 6490266809f59425d9ee22c70f517bd6dcae7fe0..6f4592a8d5733712551b3e350f4b037ac148f9bc 100644 --- a/polkadot/bridges/deployments/types-rococo.json +++ b/polkadot/bridges/deployments/types-rococo.json @@ -1,17 +1,19 @@ { "--1": "Rococo Types", + "RococoAddress": "AccountId", + "RococoLookupSource": "AccountId", "RococoBalance": "u128", "RococoBlockHash": "H256", "RococoBlockNumber": "u32", "RococoHeader": "Header", "--2": "Wococo Types", + "WococoAddress": "AccountId", + "WococoLookupSource": "AccountId", "WococoBalance": "RococoBalance", "WococoBlockHash": "RococoBlockHash", "WococoBlockNumber": "RococoBlockNumber", "WococoHeader": "RococoHeader", "--3": "Common types", - "Address": "AccountId", - "LookupSource": "AccountId", "AccountSigner": "MultiSigner", "SpecVersion": "u32", "RelayerId": "AccountId", @@ -30,7 +32,7 @@ "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", - "MessageId": "(Id, u64)", + "BridgeMessageId": "(Id, u64)", "MessageKey": { "lane_id": "LaneId", "nonce:": "MessageNonce" @@ -50,9 +52,9 @@ "dispatch_results": "BitVec" }, "OutboundLaneData": { - "latest_generated_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", - "oldest_unpruned_nonce": "MessageNonce" + "latest_generated_nonce": "MessageNonce" }, "MessageData": { "payload": "MessagePayload", @@ -132,6 +134,8 @@ "commit": "Commit", "votes_ancestries": "Vec" }, + "Address": "RococoAddress", + "LookupSource": "RococoLookupSource", "Fee": "RococoBalance", "Balance": "RococoBalance", "BlockHash": "RococoBlockHash", diff --git a/polkadot/bridges/deployments/types-wococo.json b/polkadot/bridges/deployments/types-wococo.json index 1a4084e94cfcadd6b908165d70c2ece083b4c114..562f08afa9c01fe491fdd458bc122daff0cedcf2 100644 --- a/polkadot/bridges/deployments/types-wococo.json +++ b/polkadot/bridges/deployments/types-wococo.json @@ -1,17 +1,19 @@ { "--1": "Rococo Types", + "RococoAddress": "AccountId", + "RococoLookupSource": "AccountId", "RococoBalance": "u128", "RococoBlockHash": "H256", "RococoBlockNumber": "u32", "RococoHeader": "Header", "--2": "Wococo Types", + "WococoAddress": "AccountId", + "WococoLookupSource": "AccountId", "WococoBalance": "RococoBalance", "WococoBlockHash": "RococoBlockHash", "WococoBlockNumber": "RococoBlockNumber", "WococoHeader": "RococoHeader", "--3": "Common types", - "Address": "AccountId", - "LookupSource": "AccountId", "AccountSigner": "MultiSigner", "SpecVersion": "u32", "RelayerId": "AccountId", @@ -30,7 +32,7 @@ "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", - "MessageId": "(Id, u64)", + "BridgeMessageId": "(Id, u64)", "MessageKey": { "lane_id": "LaneId", "nonce:": "MessageNonce" @@ -50,9 +52,9 @@ "dispatch_results": "BitVec" }, "OutboundLaneData": { - "latest_generated_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", - "oldest_unpruned_nonce": "MessageNonce" + "latest_generated_nonce": "MessageNonce" }, "MessageData": { "payload": "MessagePayload", @@ -132,6 +134,8 @@ "commit": "Commit", "votes_ancestries": "Vec" }, + "Address": "WococoAddress", + "LookupSource": "WococoLookupSource", "Fee": "WococoBalance", "Balance": "WococoBalance", "Hash": "WococoBlockHash", diff --git a/polkadot/bridges/deployments/types/common.json b/polkadot/bridges/deployments/types/common.json index d3395ea687fddd98baeb72ff9a1abda7e28d795d..4e129f7132beddb0bf1e724846099bf84e3903fe 100644 --- a/polkadot/bridges/deployments/types/common.json +++ b/polkadot/bridges/deployments/types/common.json @@ -1,7 +1,5 @@ { "--3": "Common types", - "Address": "AccountId", - "LookupSource": "AccountId", "AccountSigner": "MultiSigner", "SpecVersion": "u32", "RelayerId": "AccountId", @@ -20,7 +18,7 @@ "ChainId": "Id", "LaneId": "Id", "MessageNonce": "u64", - "MessageId": "(Id, u64)", + "BridgeMessageId": "(Id, u64)", "MessageKey": { "lane_id": "LaneId", "nonce:": "MessageNonce" @@ -40,9 +38,10 @@ "dispatch_results": "BitVec" }, "OutboundLaneData": { - "latest_generated_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce", "latest_received_nonce": "MessageNonce", - "oldest_unpruned_nonce": "MessageNonce" + "latest_generated_nonce": "MessageNonce" + }, "MessageData": { "payload": "MessagePayload", diff --git a/polkadot/bridges/deployments/types/millau.json b/polkadot/bridges/deployments/types/millau.json index f738701263d533bc81825d5deb64c879f540aa95..589d5619df453162c3d61c03824b5796244da6d6 100644 --- a/polkadot/bridges/deployments/types/millau.json +++ b/polkadot/bridges/deployments/types/millau.json @@ -1,4 +1,6 @@ { + "Address": "MillauAddress", + "LookupSource": "MillauLookupSource", "Fee": "MillauBalance", "Balance": "MillauBalance", "Hash": "MillauBlockHash", diff --git a/polkadot/bridges/deployments/types/rialto-millau.json b/polkadot/bridges/deployments/types/rialto-millau.json index 96efb84fc3bb9cc50ca9034c1c381b2a98225944..971cf666d479e9cb0e6d3f4dd85f4e47d1ae5a13 100644 --- a/polkadot/bridges/deployments/types/rialto-millau.json +++ b/polkadot/bridges/deployments/types/rialto-millau.json @@ -1,5 +1,7 @@ { "--1": "Millau Types", + "MillauAddress": "AccountId", + "MillauLookupSource": "AccountId", "MillauBalance": "u64", "MillauBlockHash": "H512", "MillauBlockNumber": "u64", @@ -25,6 +27,8 @@ } }, "--2": "Rialto Types", + "RialtoAddress": "MultiAddress", + "RialtoLookupSource": "MultiAddress", "RialtoBalance": "u128", "RialtoBlockHash": "H256", "RialtoBlockNumber": "u32", diff --git a/polkadot/bridges/deployments/types/rialto.json b/polkadot/bridges/deployments/types/rialto.json index fe1ba31e8aa3596a54ab76339fc08959886aa90c..77c30b7cc2d7b052bbc635e900a583a8aa6dc55c 100644 --- a/polkadot/bridges/deployments/types/rialto.json +++ b/polkadot/bridges/deployments/types/rialto.json @@ -1,4 +1,6 @@ { + "Address": "RialtoAddress", + "LookupSource": "RialtoLookupSource", "Fee": "RialtoBalance", "Balance": "RialtoBalance", "BlockHash": "RialtoBlockHash", @@ -10,5 +12,6 @@ "_enum": { "RialtoToMillauConversionRate": "u128" } - } + }, + "ValidationCodeHash": "H256" } diff --git a/polkadot/bridges/deployments/types/rococo-wococo.json b/polkadot/bridges/deployments/types/rococo-wococo.json index b1c4cfa21b92cf2acb45c90c34aeaf2168d1e4ee..e0864c2ffb0b3181408624d154617f66048287be 100644 --- a/polkadot/bridges/deployments/types/rococo-wococo.json +++ b/polkadot/bridges/deployments/types/rococo-wococo.json @@ -1,10 +1,14 @@ { "--1": "Rococo Types", + "RococoAddress": "AccountId", + "RococoLookupSource": "AccountId", "RococoBalance": "u128", "RococoBlockHash": "H256", "RococoBlockNumber": "u32", "RococoHeader": "Header", "--2": "Wococo Types", + "WococoAddress": "AccountId", + "WococoLookupSource": "AccountId", "WococoBalance": "RococoBalance", "WococoBlockHash": "RococoBlockHash", "WococoBlockNumber": "RococoBlockNumber", diff --git a/polkadot/bridges/deployments/types/rococo.json b/polkadot/bridges/deployments/types/rococo.json index 4576378fd47920ea2a979fdbb0480abbb23f624c..fa1bf2750095d976404089d5f09773d9096df42f 100644 --- a/polkadot/bridges/deployments/types/rococo.json +++ b/polkadot/bridges/deployments/types/rococo.json @@ -1,4 +1,6 @@ { + "Address": "RococoAddress", + "LookupSource": "RococoLookupSource", "Fee": "RococoBalance", "Balance": "RococoBalance", "BlockHash": "RococoBlockHash", diff --git a/polkadot/bridges/deployments/types/wococo.json b/polkadot/bridges/deployments/types/wococo.json index cc01a6ccecfb9e613893b8cc77b57e2375cc2a65..7c7b4ff27688ee34db6763e153dd910f3f4aadc9 100644 --- a/polkadot/bridges/deployments/types/wococo.json +++ b/polkadot/bridges/deployments/types/wococo.json @@ -1,4 +1,6 @@ { + "Address": "WococoAddress", + "LookupSource": "WococoLookupSource", "Fee": "WococoBalance", "Balance": "WococoBalance", "Hash": "WococoBlockHash", diff --git a/polkadot/bridges/deployments/ui/README.md b/polkadot/bridges/deployments/ui/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ad946fc699bf89cc05d675267013fc77ff4db36a --- /dev/null +++ b/polkadot/bridges/deployments/ui/README.md @@ -0,0 +1,23 @@ +# bridges-ui + +This is a Bridges UI docker configuration file. The source of the Bridges UI code +can be found in [the repository](https://github.com/paritytech/parity-bridges-ui). +The CI should create and publish a docker image that is used by this configuration +file, so that the code is always using the latest version. +The UI is configured to point to local Rialto and Millau nodes to retrieve the require +data. + +This image can be used together with `nginx-proxy` to expose the UI externally. See +`VIRTUAL_*` and `LETSENCRYPT_*` environment variables. + +After start the UI is available at `http://localhost:8080` + +## How to? + +In current directory: +```bash +docker-compose up -d +``` + +Then start `rialto` & `millau` networks with the same command (one folder up) or +run the full setup by using `../run.sh` script. diff --git a/polkadot/bridges/deployments/ui/docker-compose.yml b/polkadot/bridges/deployments/ui/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..8b3f8178c36e58c3370c8525fdad2910dbe542a6 --- /dev/null +++ b/polkadot/bridges/deployments/ui/docker-compose.yml @@ -0,0 +1,13 @@ +version: '3.5' +services: + bridges-ui: + image: paritytech/parity-bridges-ui + environment: + VIRTUAL_HOST: ui.brucke.link + VIRTUAL_PORT: 80 + LETSENCRYPT_HOST: ui.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io + CHAIN_1_SUBSTRATE_PROVIDER: ${UI_CHAIN_1:-ws://localhost:9944} + CHAIN_2_SUBSTRATE_PROVIDER: ${UI_CHAIN_2:-ws://localhost:19944} + ports: + - "8080:80" diff --git a/polkadot/bridges/diagrams/ARCHITECTURE.md b/polkadot/bridges/diagrams/ARCHITECTURE.md deleted file mode 100644 index 6da88c448c95eddd726952c75ad3a49d458e0e76..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/ARCHITECTURE.md +++ /dev/null @@ -1,13 +0,0 @@ -# Bridge Architecture Diagrams - -## Bridge Relay -![General Overview](general-overview.svg) -![Bridge Relay Node](bridge-relay.svg) - -## Runtime Modules -![Ethereum Pallet](ethereum-pallet.svg) -![Currency Exchange Pallet](currency-exchange-pallet.svg) - -## Usage -![Cross Chain Fund Transfer](cross-chain-fund-transfer.svg) -![Parachain](parachain.svg) diff --git a/polkadot/bridges/diagrams/bridge-architecture-diagrams.drawio b/polkadot/bridges/diagrams/bridge-architecture-diagrams.drawio deleted file mode 100644 index bf073129c2973eaa597162dbab8328e1ca56d52c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/bridge-architecture-diagrams.drawio +++ /dev/null @@ -1 +0,0 @@ -5VjZctowFP0aHul4wYAfCVm70DR0JklfOootbE1kiQg52P36XmF5lSctaWkmDQ9gHclXvuccpGsN3HmSnQm0jj/xENOBY4XZwD0eOI499X34UUheII7tjQokEiTUo2pgSX5gDVoaTUmIN62BknMqyboNBpwxHMgWhoTg2/awFaftWdcowgawDBA10WsSyrhAp55V4+eYRHE5s23pnjsU3EeCp0zPxzjDRU+CyjB66CZGId82IPdk4M4F57K4SrI5porXkjH/4b1k2SKJtxfn4/lksTr3vgyLYKf73FIlJzCTzw6dffh0tZiFF6v01sZe9PCwHrLhpAj9iGiqmdS5yrykFtJeq8sgp4SFWAzco21MJF6uUaDwLVgKsFgmFFo2XN4pOnH48a4CKpI/pxKiYI1vpOD3lWBA6NFvZqoZecRC4qzhAJ35GeYJliKHIbp3OC2dmpfAVAPb2i/ORGNxwytuCSLt0aiKXrMNF5rwPcgfvxXy3ZclvzcLxyD/CFY2WGUc6wpTlBtSiIJYzd4vVDDJXRFK55xysYvmrnafQ5LuWU6bdNvyTNZtzzNZt+1Dse4arC85JSGR6smXCRISfuecSYFgl3jtEkymHd9PehRwrB7fe39BgO/Xs+D92bfTW9j6bvJ08fVKXvbYHkoCFq4RgEcpoXJ4wf4/3seTf8h771pvGbyfyBgLnCaAXvIZfC+w3HJxb7APWcs+iktKdyVLm2UNIUoipnYPIHi3dygOCVRNM92RkDBU0/RqWqtuHVSsUUesUc/mYLs9YjmHEss2xFqmd0A6kmp7WED5/NZEcgyR/JcWyVzJGv+oq5RJkii14GUnpaZer21Jc/zuVjLuWdL6BBgfSoCevbzxL/n/FRj3FbH/VIGRocA8FZB4oJ77JAtixKI3ooX10lpMDS0MljELZ+qsQy32FG02JNixCUWvCTf4BvJEfqNX+F3jVjXeeWXzOGt2HudlKyPyph4JrdtGT32TapT37KfYhqciwE+Qoo8XIMUIPxVPc4XD1imPqX9DXe+J6k3AC5wkj+2zoT7F9QyXnDDZsFdVLlbvq37HNkXm+r7moYsRyu2E8rslZkGOEQosoV5Cq2FrNWDz5EN7nZl8r+PqImbt8YrZ59veP7zt7f1sv5+FX5s17e6rzfOtaXdruoNZ03joP7QmNOtj12J4fa7tnvwE7Vtdd5s4EP01Pmf3oTlI4suPsZOm3U27aewmbd8wKIYNRi6I2O6vX2EEBklZY5/g4Lh5iTSIQbp3GM2McA8NZ8ur2Jn7n4iHwx7UvGUPXfQgBJreZ/8yyaqQGHoumcaBx2UbwSj4hYuBXJoGHk5qAykhIQ3mdaFLogi7tCZz4pgs6sMeSFh/6tyZYkkwcp1Qlt4HHvVzqQ2tjfwDDqZ+8WRg8hVPHPdxGpM04s+LSITzKzOnUMPXmPiORxYVEbrsoWFMCM1bs+UQhxmuBWL3H1f34fWjefXXl+Sn83Xw9/jz3btc2ftdbikXF+OI7q/aH7vjxXl4i6O7wSNBqO8u+S3akxOmHEm+VroqoF2DgzMlWg8NFn5A8WjuuNnVBbMmJvPpLGQ9wJoJjcljSQGDaNBw7nyNTzimeFnhlK/lCpMZpvGKDSmuIj53brJQ5/3Fhn+jz2V+hXtkcaHDbW5a6t6gxxocwB3ARBKYI5LGLpYgZZY0z5ruKgwYtvF2XCc5CdeTUlDa7T8pZVowl7cGd78Ot23JcKvQBqAttHUJ7bETTzF9C2gboGtoG7JtryKXSa4JmUuQs2VSlWMYkpAw/C/WXhYNHoIwFEROGEyjjCyG6JqqDLSAOfpzfmEWeF72GCWHdV/VFjum4HkMmRxTQQ5sixtruxPHkXeebbMZsqGTJIFbp2c3qLBX241loKo+WIFEIYtx6NDgqb6Hq+DhT7ghAZve5i0x9DOjRoUpYpysHTC/r7o5blVlia8SXXsXSdWasXLp+5NonyiJEG1FvimJsirJHlomsb+dxI2LAvuEU1WX2YPoYf3X5r5v9WuIIk12drqlMA+9LW8H2o9ZDw6yXjdbRSgLVK8g0FoDGbw5kA1D6xrIUAK5SBi0YRjgSI5ljwxzYNcxt5ACdKAAHbWGuZw2nOo+a5svts9aYk7d8j4L5HTkNFhUxKki9PuHvJJBtM2inLjc3gwlIt92Mgmsuou0gSLAOmQ2CeRM5PRYMeyusdIgtdjm8fAyoN8y4Nh7n/e+V65cLDmm685qL4Bz39MgeO+IOxWrmhJ/jbdEKCgSA5iWXSlsEK8fg3l0a7dFonmIrDY1DxMar2se8tFEnEZ/lHwk6aQHmWotnweTYOr/eWI+X6+TDRQ1d6Aqurfm86Gcq9RYYxwJrDEeT401EzWgzbIOSZsqOTHDjAMveGLNadYcpRNGg0OzZL92lJKPZE+uDD4xToFQqrEVlB40+oJmE0ovqY9jnM5+MyrH02bXGG1waHbspfq+Imc5aKkeNjjUOnKQ9dcHWc4Mb2Li4iRhwk8sRiAea9ziJA1pcmJuBwnBAVKk8Yf1O8WEKmx9nXt5GFB8v8JZOz2yrDPxHExBFzIOSZecV5d0lWc1v+nim7ySLnh2UMLg9i2njUIIAzdeVW7Kut+r1za3rXstFVD4XlAmhNvCn44WWox9Cy26kMTrtqDomUILswdnVRk2zwYkkjW+QCkGyaWYgxjoCxtaxwxIL851C96NfQ1IVNTvnAG9wAn2EXu4IqfY6uL6nTZQY99jW90UFDX8xO15A204YVv733mZwonZruOLT2afP67ebbw4f2E8a+SI7Ptuxv7UuvkBJ++CMfn3Uvvxzf14p/g5RRnTv8YHPVIw1/Q9fL6Oo9VZaPODHtbd/LgmZ2jz6yV0+R8=7Vxbd6I6FP41PtoFhOuj2jr1zFjbOr3oy1mIUahIHMCqfTi//SQQlJsa26qo0zWrJRsIw97f/vbOTkIJ1MbzH64+MZuoD+2SwPXnJXBdEgSeEzX8h0gWkUQSQ8nQtfpUthK0rQ8YXUilU6sPvcSFPkK2b02SQgM5DjT8hEx3XTRLXjZAdvKpE30IM4K2odtZ6YvV981QqkrcSn4LraEZPZnn6JmeboyGLpo69HkOcmB4ZqxH3dBLPVPvo1lMBG5KoOYi5IdH43kN2kSvkcY6wsNtxX2ZP92Zzdn0X69lvw3LYWf1XW5ZvpwLHf/TXT8/NH/fPFbE22Zt6nTrlvHDmtNbuHfdnlJN0nf1F5FqA+VA0glXAtWZafmwPdENcnaG0YRlpj+2cYvHhwPLtmvIRi5uB6oEVfoE6PpwnjLWlvfhl0rGwIVoDH13ge+jvQiRCRcpY89iAFCozIwbPzKoTkE3XPa9Uh8+oBrM12bj58/RU/WpbjxKgLt2TH5SeY0MtUmb0OlXCN5xy7B1z7OMpALh3PJfY8cdovcribau59QMQWNBG7uoE/ul7g6hv+EtlPA62E+4XNYOMT1LOWqOZC60dd96TzpqnurpE+6RhV9jaWZeSpoZiCnreWjqGpDeFcd/qqOlH0cdSamOQsVkOgqQsHztL4BDYkAH1nmbNpHrm2iIHN2+WUmrSW9cXfMLoQmFzRv0/QWlaH3qoyTAdgNLqNwNbwUYQSUygooZLV9yVJAxRWM8wcrEsrY1dLB+Be4W6n3obqBDfjsd7qbsDEeu5z5VSmCZl7Pct6S5uFPKe6M++RzRLTKiWyoUusX16C4yqEUtSdAFALVyjqCWGEEtFwrU2ehZw3olmH7WbYsQtu6Q3w3nnbZDrHvFBLusFg7sWiHAjvXsLl5XWS9pdqKslzRWOXDQ+lwSvNVJZEYn4fnv9pL87FXjk3hR2JJXPNbQF7HLJuQCb8NjuFRiwaUGlensXFA23oAPwv/Ct6bSYhaqdygXrL/0HrSTAMPsMHTwsYGBgkMhqBKntQzdrtATY6vfD7EMPetD7wX9EYhR5eHOpWpJus4F3WbXyg6CaUGEPqUUryzk0QZ3xalASWi8TN30swCLLkGDgQf3MvIRGKoMpxdGmRmCK1QcFdWMMTowGySL4zxL9HzZecrcFR45yUV3l+xI6jrIZ2YQ/xpYGPIE4STfWQS5wZosx0Tj3tQ7XoajiqkMB+RkOOCg9TnhLEswCiMRgWLxUBbmBeehCDzfwkOprKnonASyFbOSINtYX9W+9Y4Ph+SQJGGhED8kJj+Gk0WF9FXxvBM7k19IL8jYg9Wh1e926PwcX9SUqyRcgXbY0nm2DNPATLEs1RYt8oHUIApw2cin5gQ+dV9xD5xlHUtldBPtQG6SqslL3GGdJJvXL6u+T453irMaonLsmpgoHiV0rQlD/GHDkMbqX6wTxl+dCVZSM/6ZqeA9e1i26nSS8yrHd6vI9ucVj6JCzxFqxl8r/GfLc49wprskXLSnvbHl+0WFNs+liwxHn0aJoHxm2GZdPMQLhcK2kJ0LP25ETwwrV+H9MBGd3YrfXitaN30EUiGdZwvpu85rZeapwCHmqfhsnIuVcl1oQKK64Hwgjmq5oJ6l22MPajGLpmpWefVc8aD1XPHIznzUKlHEtMXNzzNBdt8rLLOh97erGyMs+oHQKaQzIl+0dEYElxwvWVe3HszF5FS+Cw5cZOKz8wD3U8fyTCyr6qfoYsIeR8NPHnRbvTey4UXg7HAWi9z6DF1rsEiVDeypYfUNU3f98JYGtYNvdpvC3POvX7yH8MrNtXI59v7UpDEjxFScZwH8b4Acnzomz9N2tKWjRABHfrDcw3dazhBLAWn5LhrBFpH5RLM8t7s/7lApBHLCiKoCMkbU5KwNFWFnG+JmzIwbzVozYRBsBlhXAleZYtpziTawU5BZOa5m6s4wmuNcZ+4yNvft80ipdM1XFnMrF2BuoIgFNPdmO/6H7fgxrvm1udBnsaPKasd45OTVHAvm2ybJvrpr0FuCILo1ons4rvjRxqVoYxeJvsvdTD0bYfAHorplL2k8tqPpiuMAL8RnY1dzs/eYEbFpyFR+MgEoc7HbchKB9I3fnAEorKu8lE/smlpuQkyMliLhbqnC+gExewrwCaqzBviXA2fLwWtptUSJTNGSfZypNUrrHAa+dZu/P0Yv/zy3OiwOo10A8UnRkOaEiM/GdpSu1cZb76HMYEeVY7XjX+I7JPGxLkJRWKfXz4P4GJP24ajbNLtQaz5UZBYnEL5CZusQLnGnQnOpndzROoAtLBctZT4Gy4nYwI7VgvPH4Q2LgcGFsJysiaIIBFVTT4vvVNZqqsK6Le48+C53hcM6pzDsbtPSfk0rr9OfLE4hHiiFC66jHfNbuS64A2M8K4n1PQh+9lmoUlKkyOdNt4hZXEWlkf2wYsPBJ+J44DD8EClx+MglH2nZCBAPdZuPd3B0bzoeC0CkC8jxZYk/uRy/iu340pOa7WlbYLEjc03yxKPfacW8KHXfvtCc9fsG5xHzmro7KsX2j5PnClydljX6m12jNek2u8q8/CJe1Vlc4xLqt4qqnRzFPWM7jv80fjVuJn9Y7Pi3fltEilNZP3ahXlb99t6dku/cca2gPnvLUp/VsEM8d64mPX94x+IQl1Cf1YTkXvxTIDbjT7fZK5uPVqs1Z7Cj9rc+W0hiY94keFn12bBeEWx1qgaGx+ymB8tTSkI1mcWx0V4Hu4v8Xgei+MQyH6/xrO5ylpVcnudSKzNOoJbLYxPXXzv3//hig8XEzMX6y2TEZQlY40T1SOzIvHeNdSvOebDjXTBTv4cJLI15fuMsaU9OrcqMvoZ5VNK7cbaUKna18ZdKFSdvY16W0itHD2llLFp9ajwkhdW33MHN/w==5VpdV6M8EP41vXRPAqUtl9rWj3P86K67x1dv9qSQQjQQDMG2/vo3gSCf1lqrrWe9kUyGgTzzzGQmtGMOg8UJR5F/wVxMOwZwFx1z1DEMCLq2/Kcky1xidTOJx4mrZYXgmjzjXFFLE+LiuKIoGKOCRFWhw8IQO6IiQ5yzeVVtxmj1qRHycENw7SDalN4QV/iZdGCBQn6KiefnT4ZAz0yR8+BxloT6eSELcTYToNyMVo195LJ5SWSOO+aQMyayq2AxxFThmiN2PT4fJ/dD93Hi9/2jx8mjMWEHmbHj99zysjiOQ7Gx6cNxd+CeUfE4t9njYhksEXIODL00scyRxK4EVg8ZFz7zWIjouJAepWhhZRXIUaFzzlgkhVAK77EQS80SlAgmRb4IqJ6Vq+DL//T96eBWDX5Y+XC0KE+Olnq0Jgwarpgl3NGLurzD4Z+Zc3N7B/4KByx/To2hhgsIxD0sVmCk7SlgSmTTIJ9gFmD5klKBY4oEeaoyEmliey96hYvkhfZSOvwTY341vVfhYQCKpphmt/7mKIyRIwgLM5s0cYjr+IiLTP1Me9h7uLvw77B98fOwl2nmvn99/VWulHhQ8tfcJwJfRyhFcy4zSIUESgNxR3vbUo6asVDoMYR6PGSU8dS0CdI/KY+lTRJ6UmqqkeDsAV8pmVAuh+D9Tn/CXODFSjfls3lY65RnQo3ZvMgfthb5pdTRHbzbsXJY8u07orO3i+jcPMpa17BulFlfFGXrIa/f+gnRRD9pgngsdwbQiMaSd6pB0RY2G+O8PrFNWCU27IEGsV/IX2Z2rrd1MM3dbjLFvnJbntv6JrNq83iT/mCv6N9t0P9WVndtXjzP9qky8ogSL5TXjkQQy5R/pLhLZK12qCcC4rqZk3FMntE0taewjxgJRboW66hjjVq9sYphjSB5KTj1QyqVW1vwHIAfEPT7lQDKi6S1odfWJ2o1JRU2m8WSA3XfvLzE5u6CO9kovlmEwa3Xcemth5yjZUlBc7hJgzw996vpuduvle01fQgGg1U3yIvsFbbLqH4jA1yyPU4AeQRsJQPYwLYrkO9/Amh46yzOYFPPZzP1EqqtJjK+ZTi7qvGlzHnomMfNIsZnwTSJd1jAAKuK/sBqFjBmSwED4WdVMHDHJcxX9cmrKvPvkmCtbq2xA/YX5Evrm1VMcJsl06BnGd8rYTbddYqeVHs3kzGrUieiHCNXLXOKcahQoYgEKpr3MGN27VrL121p+UBbxgSfljFBA6bvd3bRXzPz2dtOfB+Cvlm6jXDEYqIiS8YXS1/EIRHBWbA5joRdNFm9D4cZg+6+HWbAnRyZb5nY9s629A9hbzeY/QvPEVfl7HUyDYhQu+de8hgCo9rFfSaRX/uQMA7dzNb7PyCsYMe/+wUB1JJTW6PyhZ8Q7Mu+Efds/3QijJuDoTGZLY5bDrIPFRicOGm1g9NecM4SmvaEShZKWSqdSbRljvBJs46NfRSpS1miUpaIt+MqwlxWT2lxm980KUSf5qJe7bClv2bQDbawe+C/1IPPEb8ySASdE352FrHdfG/dfPNoXcO6X3T6u9o7Vr11KRCGLFRMUkznLMgIj1W/JYv9hEuwnGVRMVEmaVuZ2cdtps73Ly2XWmFvnky9co74z56lrGLrm7FT9OCGNr1pz52fntRbSbvGjGyJ+q6CHA1Djcq9bijDYIWhj5wCyGHxw5xMvfjlkzn+Hw==5VhdU6MwFP01POoAgaqPtlZ3R3d0ts7sc4BbyBhIJ4R++Os3KeEzONZVnLr2BXJJbsg599x7i4Vm6faG41Xyi0VALdeOtha6slzXsb0LeVGWXWXxvdIScxJpW2NYkGeoJmprQSLIOxMFY1SQVdcYsiyDUHRsmHO26U5bMtrddYVjMAyLEFPT+odEIimt577d2H8AiZNqZ8fWTwIcPsWcFZneL2MZlE9SXLnRU/MER2zTMqG5hWacMVHepdsZUIVrhdht+pM4cjMes+y68Cfp+vH2pHR2/ZYl9eE4ZOJjXbul6zWmhUZyLhLgUKT6zGJXQSyPv1K34Y6SLAJuoekmIQIWKxwq+0aGlrQlIqVy5MjbQMEK0V1QG2qw7wshvYC2H3g+jcMauIBti3d93htgKQi+k1P0Uxdp6nRYu+d6vGlipI6EpB0f1UKs4zKufTcIyxsN8hsARwbgiyLIBccC/gfEvcnRIe4ZiF+GgvEX0S5SWk5AU3VuIpPMHQ6APrCcCMIyOSVgQrBUTqDqwbTGeMaoWie9oeX+1/JxSUms1grW44yVxMzqzGiPyI/jd/nxfZMfNEDPZCx2JgY7BjGQRZeqRCglUJznJOwC+DasIOpUEhOpFhL+ABKVjQPFgqy79WcIHr3DAyPy9RoiLnpEuD2Ec1bwEPSqdlrvOXK9riOv70hgHoMwHO3Zqo/97wRWHUCLQedUjheSNnm5Y+GTvDxuTVolEXthddnEWiehZBP4gIBSEkXKx5RDTp5xsPenNLNSR9wf2p9a/tXhkVHFYF9FdbuiN+nU/SF12ae263Xz3/uCpZrClsscxqHPMehzFX3zlKgXmK9Bh81QqsyYgAOK0gs1KML86V6uIkKBJbHzP7EVqPXXLkzOgN7Px8p8jtl7TWVvLbOTa/+W8bIzYOdlddfwvYL6WECiXr7x7QEg3c8sIY7ZU32PGoLcV1L/oTUE9boCrx/0Y9cQs0dDp3sZ4OjFJHQ89aMOwA8oIM7ki9UP/5uKz+uLr6+ZQ8XnnfUc9f8JjS0+swX3Wg2c6tzsB87Y8pgVWEXhuxV4IiXYbRSOXoFnBn++4u+G4/0bXMumITe4+4ItXP/bwpgtnBw2H/ZKppovp2j+Fw==3ZnLcpswFIafxstmEEIOLGPHSWbaTtx60aY7GVRgAsgjhI3z9BVG4ibqOB5fszL6EQfrO7+kI3sAx3H+yPAi+E49Eg1Mw8sH8H5gmsCwHPFRKGulIKtUfBZ6UquFWfhGVEepZqFH0lZHTmnEw0VbdGmSEJe3NMwYXbW7/aVR+60L7BNNmLk40tVfoceDUrWRUetPJPQD9WZgyDtz7L76jGaJfF9CE1LeibEKI7umAfboqiHByQCOGaW8vIrzMYkKrorY16c0c//APGEPy2ecvC3Wyx9fymAPH3mkGhwjCT9saLMMvcRRJklOafQqxsnlmPlaIRbDXxSX7joKE4+wARytgpCT2QK7hb4S1hJawONItIC4nBdYifdtXgkV7OeMiyhE6juOT3JYEsZJ3si7HO8joTHhbC26yLu2zJx0tXmLyvaqtogJZZ+gYQ+oRCxt6Veha8DiQjL+AG+o8f4MnC0TtUEr8E3QqA+0eSzQlgZ6wgPCSBYL9WeW8DAW89wQS2EWES0HrCQqsb2D/2hUrTZVC/VQBX1UjwUVaVBn2TzlDHNytVQRODfVoUZ1nDExbLf41pPcDXDiXy9f6Jybr/3+mksS766oQoplN8JpGrqCUMox47q8N8OUZswl72/G4rU+2RZPbiLEa9VEekYaxPsWX6UxEmEeLtuVVF8W5BumNBTDrRIOUXuXRUYnkeW45VPNAqUTyDLagar1TgUqwWiBNqaohr2/T5ytq9sIp2JqmcY4wGEiPu8+w8Ztd5Cb+lwFdo91zKPt22pl2DELo8+QBWA47TRA6/x5AFoeGgXUlN7VSbj+BHSnwbCn0jo1f/1kNhIn77IQEAv2Or3M7d8GHSv3HAXAsAclsI6GcodD154FAMlD/ltcGzeOM5Ttl00bQUu27wsyhmqsG40pYaKe48UU2WgHLifgjuWEstqF1BPDzmJoWc4N2q+i6M7rnlBHrimAfg49mPcS8dVK8yHVfGneq423aTWd13hMOXarWw/sTLW5XJs17Y41h90zy87G7ARCJy51gX6SP7ctD22xXQ9TzmU5DP2nEvyow5xuZe+c2GH6rxqH3nRBe8M96QK2q7tUNXQh9gJGpzyzbvf0FzCGnUjdUnhvg4lm/adK2b3+1wpO/gE= \ No newline at end of file diff --git a/polkadot/bridges/diagrams/bridge-relay.svg b/polkadot/bridges/diagrams/bridge-relay.svg deleted file mode 100644 index 2907a7c7fce9d99b60158af33633db9e6b871d34..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/bridge-relay.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Source
Source
Target
Target
Sync Loop
Sync Loop
Source Client
Source Client
RPC
RPC
RPC
RPC
run(source=sub, target=eth)
run(source=sub, target=eth)
run(source=eth, target=sub)
run(source=eth, target=sub)
Substrate Sync Loop
Substrate Sync Loop
Ethereum Sync Loop
Ethereum Sync Loop
Process Method Results
Process Method Results
Update Target Methods
Update Target Methods
Update Source Methods
Update Source Methods
Target Client
Target Client
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/cross-chain-fund-transfer.svg b/polkadot/bridges/diagrams/cross-chain-fund-transfer.svg deleted file mode 100644 index 5fd9ced1d436773b94e060b9eacdc1f4bb0e7e33..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/cross-chain-fund-transfer.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Ethereum
Ethereum
Substrate
Substrate
Actor
Actor
1. Send Lock Tx
1. Send Lock Tx
2. Emit Event
2. Emit Event
Bridge Relay
Bridge Relay
3. Read Event
3. Read Event
4. Send Tx Proof
4. Send Tx Proof
5. Grant Funds
5. Grant Funds
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/currency-exchange-pallet.svg b/polkadot/bridges/diagrams/currency-exchange-pallet.svg deleted file mode 100644 index 1f1b2ef7b5ce98da060efd829fcce4628f50c21b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/currency-exchange-pallet.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Transaction
Transaction
Parse Transaction
Parse Transaction
Yes
Yes
No
No
Is part of a finalized block?
Is part of a finalize...
Yes
Yes
Have funds already been claimed?
Have funds alrea...
Deposit into recipient account
Deposit into recipie...
Reward Submitter
Reward Submitter
End
End
A price feed would be needed for this
A price feed would b...
Convert from foreign currency into local currency
Convert from foreign...
No
No
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/ethereum-pallet.svg b/polkadot/bridges/diagrams/ethereum-pallet.svg deleted file mode 100644 index 934255be226084145acf75b55808d8d28bd1fa3e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/ethereum-pallet.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Import Signed Header
Import Signed Header
Import Header
Import Header
Count Valid and Invalid Headers
Count Valid and Inva...
No
No
Yes
Yes
Did we finalize any headers
Did we finalize any h...
Yes
Yes
No
No
Is Signed
Is Signed
Import Unsigned Header
Import Unsigned Head...
Import Header
Import Header
Reward Submitter
Reward Submitter
Did we receive valid headers?
Did we receive valid he...
Track Good Submitter
Track Good Submitter
Punish Bad Submitter
Punish Bad Submitter
Verify Header
Verify Header
Check for Authority Set Changes
Check for Authori...
Check if new header finalizes old headers
Check if new head...
Header
Header
Import Header
Import Header
Insert Header into Storage
Insert Header int...
Mark Headers as Finalized
Mark Headers as F...
Prune Old Headers
Prune Old Headers
Imported Block Hash + Finalized Headers
Imported Block Ha...
New Header
New Header
End
End
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/general-overview.svg b/polkadot/bridges/diagrams/general-overview.svg deleted file mode 100644 index d7706893ab9d147bc741ec879e8ef3996261c44d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/general-overview.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Bridge Relay
Bridge Relay
Solidity Smart Contract
Solidity Smart Contract
Grandpa Built-In
Grandpa Built-In
Ethereum PoA Network
Ethereum PoA Network
Substrate Node
Substrate Node
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/parachain.svg b/polkadot/bridges/diagrams/parachain.svg deleted file mode 100644 index a1a15f172cf03a4704a90c8a40160809ca188e5a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/parachain.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Polkadot
Polkadot
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Substrate Based Chain A
Substrate Based Chain A
Substrate Based Chain B
Substrate Based Chain B
Ethereum PoA Chain
Ethereum PoA Chain
Bridge Relays
Bridge Relays
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/docs/high-level-overview.md b/polkadot/bridges/docs/high-level-overview.md index 9ca3ca42ff5e51eab3c1cbd5ee9181a5384f9ed0..2642c20c86abbc723f385d96b1f02e8179020f12 100644 --- a/polkadot/bridges/docs/high-level-overview.md +++ b/polkadot/bridges/docs/high-level-overview.md @@ -48,33 +48,21 @@ High level sequence charts of the process can be found in [a separate document]( ### Substrate (GRANDPA) Header Sync -The header sync pallet (`pallet-substrate-bridge`) is an on-chain light client for chains which use -GRANDPA finality. It is part of the target chain's runtime, and accepts headers from the source -chain. Its main goals are to accept valid headers, track GRANDPA finality set changes, and verify -GRANDPA finality proofs (a.k.a justifications). +The header sync pallet (`pallet-bridge-grandpa`) is an on-chain light client for chains which use +GRANDPA finality. It is part of the target chain's runtime, and accepts finality proofs from the source +chain. Verify GRANDPA finality proofs (a.k.a justifications) and track GRANDPA finality set changes. The pallet does not care about what block production mechanism is used for the source chain -(e.g Aura or BABE) as long as it uses the GRANDPA finality gadget. Due to this it is possible for -the pallet to import (but not necessarily finalize) headers which are _not_ valid according to the -source chain's block production mechanism. +(e.g Aura or BABE) as long as it uses the GRANDPA finality gadget. In fact the pallet does not +necessarily store all produced headers, we only import headers with valid GRANDPA justifications. -The pallet has support for tracking forks and uses the longest chain rule to determine what the -canonical chain is. The pallet allows headers to be imported on a different fork from the canonical -one as long as the headers being imported don't conflict with already finalized headers (for -example, it will not allow importing a header at a lower height than the best finalized header). - -When tracking authority set changes, the pallet - unlike the full GRANDPA protocol - does not -support tracking multiple authority set changes across forks. Each fork can have at most one pending -authority set change. This is done to prevent DoS attacks if GRANDPA on the source chain were to -stall for a long time (the pallet would have to do a lot of expensive ancestry checks to catch up). - -Referer to the [pallet documentation](../modules/substrate/src/lib.rs) for more details. +Referer to the [pallet documentation](../modules/grandpa/src/lib.rs) for more details. #### Header Relayer strategy There is currently no reward strategy for the relayers at all. They also are not required to be staked or registered on-chain, unlike in other bridge designs. We consider the header sync to be -an essential part of the bridge and the incentivisation should be happening on the higher layers. +an essential part of the bridge and the incentivization should be happening on the higher layers. At the moment, signed transactions are the only way to submit headers to the header sync pallet. However, in the future we would like to use unsigned transactions for headers delivery. This will @@ -110,7 +98,7 @@ Users of the pallet add their messages to an "outbound lane" on the source chain finalized message relayers are responsible for reading the current queue of messages and submitting some (or all) of them to the "inbound lane" of the target chain. Each message has a `nonce` associated with it, which serves as the ordering of messages. The inbound lane stores the last -delivered nonce to prevent replaying messages. To succesfuly deliver the message to the inbound lane +delivered nonce to prevent replaying messages. To successfully deliver the message to the inbound lane on target chain the relayer has to present present a storage proof which shows that the message was part of the outbound lane on the source chain. diff --git a/polkadot/bridges/docs/poa-eth.md b/polkadot/bridges/docs/poa-eth.md deleted file mode 100644 index 43b30f8bb737e41b541fb5079aaa77044627b47c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/poa-eth.md +++ /dev/null @@ -1,71 +0,0 @@ -# PoA Ethereum High Level Documentation - -NOTE: This is from the old README - -### Ethereum Bridge Runtime Module -The main job of this runtime module is to keep track of useful information an Ethereum PoA chain -which has been submitted by a bridge relayer. This includes: - - - Ethereum headers and their status (e.g are they the best header, are they finalized, etc.) - - Current validator set, and upcoming validator sets - -This runtime module has more responsibilties than simply storing headers and validator sets. It is -able to perform checks on the incoming headers to verify their general integrity, as well as whether -or not they've been finalized by the authorities on the PoA chain. - -This module is laid out as so: - -``` -├── ethereum -│ └── src -│ ├── error.rs // Runtime error handling -│ ├── finality.rs // Manage finality operations -│ ├── import.rs // Import new Ethereum headers -│ ├── lib.rs // Store headers and validator set info -│ ├── validators.rs // Track current and future PoA validator sets -│ └── verification.rs // Verify validity of incoming Ethereum headers -``` - -### Currency Exchange Runtime Module -The currency exchange module is used to faciliate cross-chain funds transfers. It works by accepting -a transaction which proves that funds were locked on one chain, and releases a corresponding amount -of funds on the recieving chain. - -For example: Alice would like to send funds from chain A to chain B. What she would do is send a -transaction to chain A indicating that she would like to send funds to an address on chain B. This -transaction would contain the amount of funds she would like to send, as well as the address of the -recipient on chain B. These funds would now be locked on chain A. Once the block containing this -"locked-funds" transaction is finalized it can be relayed to chain B. Chain B will verify that this -transaction was included in a finalized block on chain A, and if successful deposit funds into the -recipient account on chain B. - -Chain B would need a way to convert from a foreign currency to its local currency. How this is done -is left to the runtime developer for chain B. - -This module is one example of how an on-chain light client can be used to prove a particular action -was taken on a foreign chain. In particular it enables transfers of the foreign chain's native -currency, but more sophisticated modules such as ERC20 token transfers or arbitrary message transfers -are being worked on as well. - -## Ethereum Node -On the Ethereum side of things, we require two things. First, a Solidity smart contract to track the -Substrate headers which have been submitted to the bridge (by the relay), and a built-in contract to -be able to verify that headers have been finalized by the GRANDPA finality gadget. Together this -allows the Ethereum PoA chain to verify the integrity and finality of incoming Substrate headers. - -The Solidity smart contract is not part of this repo, but can be found -[here](https://github.com/svyatonik/substrate-bridge-sol/blob/master/substrate-bridge.sol) if you're -curious. We have the contract ABI in the `ethereum/relays/res` directory. - -## Rialto Runtime -The node runtime consists of several runtime modules, however not all of them are used at the same -time. When running an Ethereum PoA to Substrate bridge the modules required are the Ethereum module -and the currency exchange module. When running a Substrate to Substrate bridge the Substrate and -currency exchange modules are required. - -Below is a brief description of each of the runtime modules. - -## Bridge Relay -The bridge relay is responsible for syncing the chains which are being bridged, and passing messages -between them. The current implementation of the relay supportings syncing and interacting with -Ethereum PoA and Substrate chains. diff --git a/polkadot/bridges/docs/send-message.md b/polkadot/bridges/docs/send-message.md index 91d3bfd976b58f4a6cf055445181a4ba51d8bdfd..6984c56d67f2a87c7b5d3939750db4fccc8d85c1 100644 --- a/polkadot/bridges/docs/send-message.md +++ b/polkadot/bridges/docs/send-message.md @@ -46,22 +46,22 @@ FLAGS: SUBCOMMANDS: help Prints this message or the help of the given subcommand(s) - MillauToRialto Submit message to given Millau -> Rialto lane - RialtoToMillau Submit message to given Rialto -> Millau lane + millau-to-rialto Submit message to given Millau -> Rialto lane + rialto-to-millau Submit message to given Rialto -> Millau lane ``` Messages are send from a source chain to a target chain using a so called `message lane`. Message lanes handle both, message transport and message dispatch. There is one command for submitting a message to each of the two -available bridges, namely `MillauToRialto` and `RialtoToMillau`. +available bridges, namely `millau-to-rialto` and `rialto-to-millau`. Submitting a message requires a number of arguments to be provided. Those arguments are essentially the same -for both submit message commands, hence only the output for `MillauToRialto` is shown below. +for both submit message commands, hence only the output for `millau-to-rialto` is shown below. ``` Submit message to given Millau -> Rialto lane USAGE: - substrate-relay send-message MillauToRialto [OPTIONS] --lane --source-host --source-port --source-signer --origin --target-signer + substrate-relay send-message millau-to-rialto [OPTIONS] --lane --source-host --source-port --source-signer --origin --target-signer FLAGS: -h, --help Prints help information @@ -104,7 +104,7 @@ Usage of the arguments is best explained with an example. Below you can see, how would look like: ``` -substrate-relay send-message MillauToRialto \ +substrate-relay send-message millau-to-rialto \ --source-host=127.0.0.1 \ --source-port=10946 \ --source-signer=//Dave \ diff --git a/polkadot/bridges/fuzz/storage-proof/Cargo.toml b/polkadot/bridges/fuzz/storage-proof/Cargo.toml index 43e58ddb73e98df6c2ddba1811e490790ea0c622..c4da57b255c83de4c8c14d77b2cd46e06785dcf7 100644 --- a/polkadot/bridges/fuzz/storage-proof/Cargo.toml +++ b/polkadot/bridges/fuzz/storage-proof/Cargo.toml @@ -8,27 +8,17 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -finality-grandpa = "0.14.4" -hash-db = "0.15.2" honggfuzz = "0.5.54" log = "0.4.0" env_logger = "0.8.3" # Bridge Dependencies -bp-header-chain = { path = "../../primitives/header-chain" } bp-runtime = { path = "../../primitives/runtime" } -bp-test-utils = { path = "../../primitives/test-utils" } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/fuzz/storage-proof/README.md b/polkadot/bridges/fuzz/storage-proof/README.md index da3c7b1565e037c7fd6b3a63b7d59289b357ce34..1eeec7562a9614bebe9738701e0929852c94ac5c 100644 --- a/polkadot/bridges/fuzz/storage-proof/README.md +++ b/polkadot/bridges/fuzz/storage-proof/README.md @@ -6,7 +6,10 @@ Install dependencies: ``` $ sudo apt install build-essential binutils-dev libunwind-dev ``` - +or on nix: +``` +$ nix-shell -p honggfuzz +``` Install `cargo hfuzz` plugin: ``` @@ -29,4 +32,3 @@ HFUZZ_RUN_ARGS="-t 1 -n 12 -v -N 1000000 --exit_upon_crash" cargo hfuzz run exam ``` More details in the [official documentation](https://docs.rs/honggfuzz/0.5.52/honggfuzz/#about-honggfuzz). - diff --git a/polkadot/bridges/fuzz/storage-proof/src/main.rs b/polkadot/bridges/fuzz/storage-proof/src/main.rs index 18be72e72f228b98ae6e81828de57be2d8b9ed0a..42636a65c3dca676be449378713194a247b4442a 100644 --- a/polkadot/bridges/fuzz/storage-proof/src/main.rs +++ b/polkadot/bridges/fuzz/storage-proof/src/main.rs @@ -28,10 +28,8 @@ use sp_trie::StorageProof; use std::collections::HashMap; fn craft_known_storage_proof(input_vec: Vec<(Vec, Vec)>) -> (H256, StorageProof) { - let storage_proof_vec = vec![( - None, - input_vec.iter().map(|x| (x.0.clone(), Some(x.1.clone()))).collect(), - )]; + let storage_proof_vec = + vec![(None, input_vec.iter().map(|x| (x.0.clone(), Some(x.1.clone()))).collect())]; log::info!("Storage proof vec {:?}", storage_proof_vec); let backend = >::from(storage_proof_vec); let root = backend.storage_root(std::iter::empty()).0; @@ -47,7 +45,7 @@ fn craft_known_storage_proof(input_vec: Vec<(Vec, Vec)>) -> (H256, Stora fn transform_into_unique(input_vec: Vec<(Vec, Vec)>) -> Vec<(Vec, Vec)> { let mut output_hashmap = HashMap::new(); let mut output_vec = Vec::new(); - for key_value_pair in input_vec.clone() { + for key_value_pair in input_vec { output_hashmap.insert(key_value_pair.0, key_value_pair.1); //Only 1 value per key } for (key, val) in output_hashmap.iter() { @@ -59,18 +57,16 @@ fn transform_into_unique(input_vec: Vec<(Vec, Vec)>) -> Vec<(Vec, Ve fn run_fuzzer() { fuzz!(|input_vec: Vec<(Vec, Vec)>| { if input_vec.is_empty() { - return; + return } let unique_input_vec = transform_into_unique(input_vec); let (root, craft_known_storage_proof) = craft_known_storage_proof(unique_input_vec.clone()); - let checker = >::new(root, craft_known_storage_proof) - .expect("Valid proof passed; qed"); + let checker = + >::new(root, craft_known_storage_proof) + .expect("Valid proof passed; qed"); for key_value_pair in unique_input_vec { log::info!("Reading value for pair {:?}", key_value_pair); - assert_eq!( - checker.read_value(&key_value_pair.0), - Ok(Some(key_value_pair.1.clone())) - ); + assert_eq!(checker.read_value(&key_value_pair.0), Ok(Some(key_value_pair.1.clone()))); } }) } diff --git a/polkadot/bridges/modules/currency-exchange/Cargo.toml b/polkadot/bridges/modules/currency-exchange/Cargo.toml deleted file mode 100644 index 160a652e7c6735b58857808ce431a8d8d4ca7866..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/currency-exchange/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "pallet-bridge-currency-exchange" -description = "A Substrate Runtime module that accepts 'lock funds' transactions from a peer chain and grants an equivalent amount to a the appropriate Substrate account." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -log = { version = "0.4.14", default-features = false } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-currency-exchange/std", - "bp-header-chain/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "serde", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "sp-std", -] diff --git a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs b/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs deleted file mode 100644 index 74da4c1b7ec48a7c464e25c08876c35ea47bac8a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Exchange module complexity is mostly determined by callbacks, defined by runtime. -//! So we are giving runtime opportunity to prepare environment and construct proof -//! before invoking module calls. - -use super::{ - Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, Instance, Pallet as CurrencyExchangePallet, -}; -use sp_std::prelude::*; - -use frame_benchmarking::{account, benchmarks_instance}; -use frame_system::RawOrigin; - -const SEED: u32 = 0; -const WORST_TX_SIZE_FACTOR: u32 = 1000; -const WORST_PROOF_SIZE_FACTOR: u32 = 1000; - -/// Pallet we're benchmarking here. -pub struct Pallet, I: Instance>(CurrencyExchangePallet); - -/// Proof benchmarking parameters. -pub struct ProofParams { - /// Funds recipient. - pub recipient: Recipient, - /// When true, recipient must exists before import. - pub recipient_exists: bool, - /// When 0, transaction should have minimal possible size. When this value has non-zero value n, - /// transaction size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`. - pub transaction_size_factor: u32, - /// When 0, proof should have minimal possible size. When this value has non-zero value n, - /// proof size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`. - pub proof_size_factor: u32, -} - -/// Config that must be implemented by runtime. -pub trait Config: CurrencyExchangeConfig { - /// Prepare proof for importing exchange transaction. - fn make_proof( - proof_params: ProofParams, - ) -> <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof; -} - -benchmarks_instance! { - // Benchmark `import_peer_transaction` extrinsic with the best possible conditions: - // * Proof is the transaction itself. - // * Transaction has minimal size. - // * Recipient account exists. - import_peer_transaction_best_case { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic when recipient account does not exists. - import_peer_transaction_when_recipient_does_not_exists { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when transaction size increases. - import_peer_transaction_when_transaction_size_increases { - let i in 1..100; - let n in 1..WORST_TX_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: n, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when proof size increases. - import_peer_transaction_when_proof_size_increases { - let i in 1..100; - let n in 1..WORST_PROOF_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic with the worst possible conditions: - // * Proof is large. - // * Transaction has large size. - // * Recipient account does not exists. - import_peer_transaction_worst_case { - let i in 1..100; - let m in WORST_TX_SIZE_FACTOR..WORST_TX_SIZE_FACTOR+1; - let n in WORST_PROOF_SIZE_FACTOR..WORST_PROOF_SIZE_FACTOR+1; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: m, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - -} diff --git a/polkadot/bridges/modules/currency-exchange/src/lib.rs b/polkadot/bridges/modules/currency-exchange/src/lib.rs deleted file mode 100644 index 79dd659283df31e3457382bff5cdde053fec903c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/currency-exchange/src/lib.rs +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows tokens exchange between two bridged chains. - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_currency_exchange::{ - CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction, RecipientsMap, -}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::{decl_error, decl_module, decl_storage, ensure}; -use sp_runtime::DispatchResult; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -/// Called when transaction is submitted to the exchange module. -pub trait OnTransactionSubmitted { - /// Called when valid transaction is submitted and accepted by the module. - fn on_valid_transaction_submitted(submitter: AccountId); -} - -/// The module configuration trait -pub trait Config: frame_system::Config { - /// Handler for transaction submission result. - type OnTransactionSubmitted: OnTransactionSubmitted; - /// Represents the blockchain that we'll be exchanging currency with. - type PeerBlockchain: InclusionProofVerifier; - /// Peer blockchain transaction parser. - type PeerMaybeLockFundsTransaction: MaybeLockFundsTransaction< - Transaction = ::Transaction, - >; - /// Map between blockchains recipients. - type RecipientsMap: RecipientsMap< - PeerRecipient = ::Recipient, - Recipient = Self::AccountId, - >; - /// This blockchain currency amount type. - type Amount; - /// Converter from peer blockchain currency type into current blockchain currency type. - type CurrencyConverter: CurrencyConverter< - SourceAmount = ::Amount, - TargetAmount = Self::Amount, - >; - /// Something that could grant money. - type DepositInto: DepositInto; -} - -decl_error! { - pub enum Error for Pallet, I: Instance> { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockchain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, - /// Transaction is not finalized. - UnfinalizedTransaction, - /// Transaction funds are already claimed. - AlreadyClaimed, - } -} - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Imports lock fund transaction of the peer blockchain. - #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_peer_transaction( - origin, - proof: <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, - ) -> DispatchResult { - let submitter = frame_system::ensure_signed(origin)?; - - // verify and parse transaction proof - let deposit = prepare_deposit_details::(&proof)?; - - // make sure to update the mapping if we deposit successfully to avoid double spending, - // i.e. whenever `deposit_into` is successful we MUST update `Transfers`. - { - // if any changes were made to the storage, we can't just return error here, because - // otherwise the same proof may be imported again - let deposit_result = T::DepositInto::deposit_into(deposit.recipient, deposit.amount); - match deposit_result { - Ok(_) => (), - Err(ExchangeError::DepositPartiallyFailed) => (), - Err(error) => return Err(Error::::from(error).into()), - } - Transfers::::insert(&deposit.transfer_id, ()) - } - - // reward submitter for providing valid message - T::OnTransactionSubmitted::on_valid_transaction_submitted(submitter); - - log::trace!( - target: "runtime", - "Completed currency exchange: {:?}", - deposit.transfer_id, - ); - - Ok(()) - } - } -} - -decl_storage! { - trait Store for Pallet, I: Instance = DefaultInstance> as Bridge { - /// All transfers that have already been claimed. - Transfers: map hasher(blake2_128_concat) ::Id => (); - } -} - -impl, I: Instance> Pallet { - /// Returns true if currency exchange module is able to import given transaction proof in - /// its current state. - pub fn filter_transaction_proof( - proof: &::TransactionInclusionProof, - ) -> bool { - if let Err(err) = prepare_deposit_details::(proof) { - log::trace!( - target: "runtime", - "Can't accept exchange transaction: {:?}", - err, - ); - - return false; - } - - true - } -} - -impl, I: Instance> From for Error { - fn from(error: ExchangeError) -> Self { - match error { - ExchangeError::InvalidTransaction => Error::InvalidTransaction, - ExchangeError::InvalidAmount => Error::InvalidAmount, - ExchangeError::InvalidRecipient => Error::InvalidRecipient, - ExchangeError::FailedToMapRecipients => Error::FailedToMapRecipients, - ExchangeError::FailedToConvertCurrency => Error::FailedToConvertCurrency, - ExchangeError::DepositFailed => Error::DepositFailed, - ExchangeError::DepositPartiallyFailed => Error::DepositPartiallyFailed, - } - } -} - -impl OnTransactionSubmitted for () { - fn on_valid_transaction_submitted(_: AccountId) {} -} - -/// Exchange deposit details. -struct DepositDetails, I: Instance> { - /// Transfer id. - pub transfer_id: ::Id, - /// Transfer recipient. - pub recipient: ::Recipient, - /// Transfer amount. - pub amount: ::TargetAmount, -} - -/// Verify and parse transaction proof, preparing everything required for importing -/// this transaction proof. -fn prepare_deposit_details, I: Instance>( - proof: &<>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, -) -> Result, Error> { - // ensure that transaction is included in finalized block that we know of - let transaction = >::PeerBlockchain::verify_transaction_inclusion_proof(proof) - .ok_or(Error::::UnfinalizedTransaction)?; - - // parse transaction - let transaction = - >::PeerMaybeLockFundsTransaction::parse(&transaction).map_err(Error::::from)?; - let transfer_id = transaction.id; - ensure!( - !Transfers::::contains_key(&transfer_id), - Error::::AlreadyClaimed - ); - - // grant recipient - let recipient = T::RecipientsMap::map(transaction.recipient).map_err(Error::::from)?; - let amount = T::CurrencyConverter::convert(transaction.amount).map_err(Error::::from)?; - - Ok(DepositDetails { - transfer_id, - recipient, - amount, - }) -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use bp_currency_exchange::LockFundsTransaction; - use frame_support::{assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight}; - use sp_core::H256; - use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, - }; - - type AccountId = u64; - - const INVALID_TRANSACTION_ID: u64 = 100; - const ALREADY_CLAIMED_TRANSACTION_ID: u64 = 101; - const UNKNOWN_RECIPIENT_ID: u64 = 0; - const INVALID_AMOUNT: u64 = 0; - const MAX_DEPOSIT_AMOUNT: u64 = 1000; - const SUBMITTER: u64 = 2000; - - type RawTransaction = LockFundsTransaction; - - pub struct DummyTransactionSubmissionHandler; - - impl OnTransactionSubmitted for DummyTransactionSubmissionHandler { - fn on_valid_transaction_submitted(submitter: AccountId) { - Transfers::::insert(submitter, ()); - } - } - - pub struct DummyBlockchain; - - impl InclusionProofVerifier for DummyBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = (bool, RawTransaction); - - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { - if proof.0 { - Some(proof.1.clone()) - } else { - None - } - } - } - - pub struct DummyTransaction; - - impl MaybeLockFundsTransaction for DummyTransaction { - type Transaction = RawTransaction; - type Id = u64; - type Recipient = AccountId; - type Amount = u64; - - fn parse(tx: &Self::Transaction) -> bp_currency_exchange::Result { - match tx.id { - INVALID_TRANSACTION_ID => Err(ExchangeError::InvalidTransaction), - _ => Ok(tx.clone()), - } - } - } - - pub struct DummyRecipientsMap; - - impl RecipientsMap for DummyRecipientsMap { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map(peer_recipient: Self::PeerRecipient) -> bp_currency_exchange::Result { - match peer_recipient { - UNKNOWN_RECIPIENT_ID => Err(ExchangeError::FailedToMapRecipients), - _ => Ok(peer_recipient * 10), - } - } - } - - pub struct DummyCurrencyConverter; - - impl CurrencyConverter for DummyCurrencyConverter { - type SourceAmount = u64; - type TargetAmount = u64; - - fn convert(amount: Self::SourceAmount) -> bp_currency_exchange::Result { - match amount { - INVALID_AMOUNT => Err(ExchangeError::FailedToConvertCurrency), - _ => Ok(amount * 10), - } - } - } - - pub struct DummyDepositInto; - - impl DepositInto for DummyDepositInto { - type Recipient = AccountId; - type Amount = u64; - - fn deposit_into(_recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> { - match amount { - amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()), - amount if amount == MAX_DEPOSIT_AMOUNT * 10 => Err(ExchangeError::DepositPartiallyFailed), - _ => Err(ExchangeError::DepositFailed), - } - } - } - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - use crate as pallet_bridge_currency_exchange; - - construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Exchange: pallet_bridge_currency_exchange::{Pallet}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - } - - impl Config for TestRuntime { - type OnTransactionSubmitted = DummyTransactionSubmissionHandler; - type PeerBlockchain = DummyBlockchain; - type PeerMaybeLockFundsTransaction = DummyTransaction; - type RecipientsMap = DummyRecipientsMap; - type Amount = u64; - type CurrencyConverter = DummyCurrencyConverter; - type DepositInto = DummyDepositInto; - } - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - sp_io::TestExternalities::new(t) - } - - fn transaction(id: u64) -> RawTransaction { - RawTransaction { - id, - recipient: 1, - amount: 2, - } - } - - #[test] - fn unfinalized_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (false, transaction(0))), - Error::::UnfinalizedTransaction, - ); - }); - } - - #[test] - fn invalid_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(INVALID_TRANSACTION_ID)), - ), - Error::::InvalidTransaction, - ); - }); - } - - #[test] - fn claimed_transaction_rejected() { - new_test_ext().execute_with(|| { - ::Transfers::insert(ALREADY_CLAIMED_TRANSACTION_ID, ()); - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(ALREADY_CLAIMED_TRANSACTION_ID)), - ), - Error::::AlreadyClaimed, - ); - }); - } - - #[test] - fn transaction_with_unknown_recipient_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.recipient = UNKNOWN_RECIPIENT_ID; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToMapRecipients, - ); - }); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = INVALID_AMOUNT; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToConvertCurrency, - ); - }); - } - - #[test] - fn transaction_with_invalid_deposit_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT + 1; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::DepositFailed, - ); - }); - } - - #[test] - fn valid_transaction_accepted_even_if_deposit_partially_fails() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT; - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction), - )); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } - - #[test] - fn valid_transaction_accepted() { - new_test_ext().execute_with(|| { - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(0)), - )); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } -} diff --git a/polkadot/bridges/modules/dispatch/Cargo.toml b/polkadot/bridges/modules/dispatch/Cargo.toml index fb601a70a1e2e8737c2ab91b1b48546243901c2f..1d91d0a0509420ecb0fea19cbe4c0e84003101c5 100644 --- a/polkadot/bridges/modules/dispatch/Cargo.toml +++ b/polkadot/bridges/modules/dispatch/Cargo.toml @@ -7,9 +7,9 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -scale-info = { version = "1.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } log = { version = "0.4.14", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } # Bridge dependencies @@ -18,15 +18,14 @@ bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [dev-dependencies] sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -serde = "1.0" [features] default = ["std"] diff --git a/polkadot/bridges/modules/dispatch/src/lib.rs b/polkadot/bridges/modules/dispatch/src/lib.rs index 00858272365a2d77314fab2e29f452409f3aabfa..f467bab0d9464829698fc54cbd7ff9db1044556f 100644 --- a/polkadot/bridges/modules/dispatch/src/lib.rs +++ b/polkadot/bridges/modules/dispatch/src/lib.rs @@ -22,123 +22,139 @@ //! a successful dispatch an event is emitted. #![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] // Generated by `decl_event!` #![allow(clippy::unused_unit)] -use bp_message_dispatch::{CallOrigin, MessageDispatch, MessagePayload, SpecVersion, Weight}; +use bp_message_dispatch::{CallOrigin, MessageDispatch, MessagePayload, SpecVersion}; use bp_runtime::{ derive_account_id, messages::{DispatchFeePayment, MessageDispatchResult}, ChainId, SourceAccount, }; -use codec::{Decode, Encode}; +use codec::Encode; use frame_support::{ - decl_event, decl_module, decl_storage, - dispatch::{Dispatchable, Parameter}, + dispatch::Dispatchable, ensure, traits::{Contains, Get}, weights::{extract_actual_weight, GetDispatchInfo}, }; use frame_system::RawOrigin; -use sp_runtime::{ - traits::{BadOrigin, Convert, IdentifyAccount, MaybeDisplay, MaybeSerializeDeserialize, Member, Verify}, - DispatchResult, -}; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; - -/// The module configuration trait. -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; - /// Id of the message. Whenever message is passed to the dispatch module, it emits - /// event with this id + dispatch result. Could be e.g. (`LaneId`, `MessageNonce`) if - /// it comes from the messages module. - type MessageId: Parameter; - /// Type of account ID on source chain. - type SourceChainAccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default; - /// Type of account public key on target chain. - type TargetChainAccountPublic: Parameter + IdentifyAccount; - /// Type of signature that may prove that the message has been signed by - /// owner of `TargetChainAccountPublic`. - type TargetChainSignature: Parameter + Verify; - /// The overarching dispatch call type. - type Call: Parameter - + GetDispatchInfo - + Dispatchable< - Origin = ::Origin, - PostInfo = frame_support::dispatch::PostDispatchInfo, - >; - /// Pre-dispatch filter for incoming calls. - /// - /// The pallet will filter all incoming calls right before they're dispatched. If this filter - /// rejects the call, special event (`Event::MessageCallRejected`) is emitted. - type CallFilter: Contains<>::Call>; - /// The type that is used to wrap the `Self::Call` when it is moved over bridge. - /// - /// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure - /// that all other stuff (like `spec_version`) is OK. If we would try to decode - /// `Call` which has been encoded using previous `spec_version`, then we might end - /// up with decoding error, instead of `MessageVersionSpecMismatch`. - type EncodedCall: Decode + Encode + Into>::Call, ()>>; - /// A type which can be turned into an `AccountId` from a 256-bit hash. - /// - /// Used when deriving target chain `AccountId`s from source chain `AccountId`s. - type AccountIdConverter: sp_runtime::traits::Convert; -} +use sp_runtime::traits::{BadOrigin, Convert, IdentifyAccount, MaybeDisplay, Verify}; +use sp_std::{fmt::Debug, prelude::*}; -decl_storage! { - trait Store for Pallet, I: Instance = DefaultInstance> as Dispatch {} -} +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// Id of the message. Whenever message is passed to the dispatch module, it emits + /// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if + /// it comes from the messages module. + type BridgeMessageId: Parameter; + /// Type of account ID on source chain. + type SourceChainAccountId: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + Ord + + Default; + /// Type of account public key on target chain. + type TargetChainAccountPublic: Parameter + IdentifyAccount; + /// Type of signature that may prove that the message has been signed by + /// owner of `TargetChainAccountPublic`. + type TargetChainSignature: Parameter + Verify; + /// The overarching dispatch call type. + type Call: Parameter + + GetDispatchInfo + + Dispatchable< + Origin = ::Origin, + PostInfo = frame_support::dispatch::PostDispatchInfo, + >; + /// Pre-dispatch filter for incoming calls. + /// + /// The pallet will filter all incoming calls right before they're dispatched. If this + /// filter rejects the call, special event (`Event::MessageCallRejected`) is emitted. + type CallFilter: Contains<>::Call>; + /// The type that is used to wrap the `Self::Call` when it is moved over bridge. + /// + /// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure + /// that all other stuff (like `spec_version`) is ok. If we would try to decode + /// `Call` which has been encoded using previous `spec_version`, then we might end + /// up with decoding error, instead of `MessageVersionSpecMismatch`. + type EncodedCall: Decode + Encode + Into>::Call, ()>>; + /// A type which can be turned into an AccountId from a 256-bit hash. + /// + /// Used when deriving target chain AccountIds from source chain AccountIds. + type AccountIdConverter: sp_runtime::traits::Convert; + } + + type BridgeMessageIdOf = >::BridgeMessageId; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet {} -decl_event!( - pub enum Event where - >::MessageId, - AccountId = ::AccountId, - { + #[pallet::call] + impl, I: 'static> Pallet {} + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { /// Message has been rejected before reaching dispatch. - MessageRejected(ChainId, MessageId), + MessageRejected(ChainId, BridgeMessageIdOf), /// Message has been rejected by dispatcher because of spec version mismatch. /// Last two arguments are: expected and passed spec version. - MessageVersionSpecMismatch(ChainId, MessageId, SpecVersion, SpecVersion), + MessageVersionSpecMismatch(ChainId, BridgeMessageIdOf, SpecVersion, SpecVersion), /// Message has been rejected by dispatcher because of weight mismatch. /// Last two arguments are: expected and passed call weight. - MessageWeightMismatch(ChainId, MessageId, Weight, Weight), + MessageWeightMismatch(ChainId, BridgeMessageIdOf, Weight, Weight), /// Message signature mismatch. - MessageSignatureMismatch(ChainId, MessageId), + MessageSignatureMismatch(ChainId, BridgeMessageIdOf), /// We have failed to decode Call from the message. - MessageCallDecodeFailed(ChainId, MessageId), + MessageCallDecodeFailed(ChainId, BridgeMessageIdOf), /// The call from the message has been rejected by the call filter. - MessageCallRejected(ChainId, MessageId), + MessageCallRejected(ChainId, BridgeMessageIdOf), /// The origin account has failed to pay fee for dispatching the message. - MessageDispatchPaymentFailed(ChainId, MessageId, AccountId, Weight), + MessageDispatchPaymentFailed( + ChainId, + BridgeMessageIdOf, + ::AccountId, + Weight, + ), /// Message has been dispatched with given result. - MessageDispatched(ChainId, MessageId, DispatchResult), + MessageDispatched(ChainId, BridgeMessageIdOf, DispatchResult), /// Phantom member, never used. Needed to handle multiple pallet instances. _Dummy(PhantomData), } -); - -decl_module! { - /// Call Dispatch FRAME Pallet. - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; - } } -impl, I: Instance> MessageDispatch for Pallet { - type Message = - MessagePayload; +impl, I: 'static> MessageDispatch for Pallet { + type Message = MessagePayload< + T::SourceChainAccountId, + T::TargetChainAccountPublic, + T::TargetChainSignature, + T::EncodedCall, + >; - fn dispatch_weight(message: &Self::Message) -> Weight { + fn dispatch_weight(message: &Self::Message) -> bp_message_dispatch::Weight { message.weight } - fn dispatch Result<(), ()>>( + fn dispatch Result<(), ()>>( source_chain: ChainId, target_chain: ChainId, - id: T::MessageId, + id: T::BridgeMessageId, message: Result, pay_dispatch_fee: P, ) -> MessageDispatchResult { @@ -152,13 +168,13 @@ impl, I: Instance> MessageDispatch for source_chain, id, ); - Self::deposit_event(RawEvent::MessageRejected(source_chain, id)); + Self::deposit_event(Event::MessageRejected(source_chain, id)); return MessageDispatchResult { dispatch_result: false, unspent_weight: 0, dispatch_fee_paid_during_dispatch: false, - }; - } + } + }, }; // verify spec version @@ -177,13 +193,13 @@ impl, I: Instance> MessageDispatch for expected_version, message.spec_version, ); - Self::deposit_event(RawEvent::MessageVersionSpecMismatch( + Self::deposit_event(Event::MessageVersionSpecMismatch( source_chain, id, expected_version, message.spec_version, )); - return dispatch_result; + return dispatch_result } // now that we have spec version checked, let's decode the call @@ -196,19 +212,20 @@ impl, I: Instance> MessageDispatch for source_chain, id, ); - Self::deposit_event(RawEvent::MessageCallDecodeFailed(source_chain, id)); - return dispatch_result; - } + Self::deposit_event(Event::MessageCallDecodeFailed(source_chain, id)); + return dispatch_result + }, }; // prepare dispatch origin let origin_account = match message.origin { CallOrigin::SourceRoot => { - let hex_id = derive_account_id::(source_chain, SourceAccount::Root); + let hex_id = + derive_account_id::(source_chain, SourceAccount::Root); let target_id = T::AccountIdConverter::convert(hex_id); log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id); target_id - } + }, CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => { let digest = account_ownership_digest( &call, @@ -228,19 +245,20 @@ impl, I: Instance> MessageDispatch for target_account, target_signature, ); - Self::deposit_event(RawEvent::MessageSignatureMismatch(source_chain, id)); - return dispatch_result; + Self::deposit_event(Event::MessageSignatureMismatch(source_chain, id)); + return dispatch_result } log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account); target_account - } + }, CallOrigin::SourceAccount(source_account_id) => { - let hex_id = derive_account_id(source_chain, SourceAccount::Account(source_account_id)); + let hex_id = + derive_account_id(source_chain, SourceAccount::Account(source_account_id)); let target_id = T::AccountIdConverter::convert(hex_id); log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id); target_id - } + }, }; // filter the call @@ -252,8 +270,8 @@ impl, I: Instance> MessageDispatch for id, call, ); - Self::deposit_event(RawEvent::MessageCallRejected(source_chain, id)); - return dispatch_result; + Self::deposit_event(Event::MessageCallRejected(source_chain, id)); + return dispatch_result } // verify weight @@ -270,18 +288,21 @@ impl, I: Instance> MessageDispatch for expected_weight, message.weight, ); - Self::deposit_event(RawEvent::MessageWeightMismatch( + Self::deposit_event(Event::MessageWeightMismatch( source_chain, id, expected_weight, message.weight, )); - return dispatch_result; + return dispatch_result } // pay dispatch fee right before dispatch - let pay_dispatch_fee_at_target_chain = message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; - if pay_dispatch_fee_at_target_chain && pay_dispatch_fee(&origin_account, message.weight).is_err() { + let pay_dispatch_fee_at_target_chain = + message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; + if pay_dispatch_fee_at_target_chain && + pay_dispatch_fee(&origin_account, message.weight).is_err() + { log::trace!( target: "runtime::bridge-dispatch", "Failed to pay dispatch fee for dispatching message {:?}/{:?} with weight {}", @@ -289,13 +310,13 @@ impl, I: Instance> MessageDispatch for id, message.weight, ); - Self::deposit_event(RawEvent::MessageDispatchPaymentFailed( + Self::deposit_event(Event::MessageDispatchPaymentFailed( source_chain, id, origin_account, message.weight, )); - return dispatch_result; + return dispatch_result } dispatch_result.dispatch_fee_paid_during_dispatch = pay_dispatch_fee_at_target_chain; @@ -313,13 +334,13 @@ impl, I: Instance> MessageDispatch for "Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}. Call dispatch result: {:?}", source_chain, id, - dispatch_result.unspent_weight, + actual_call_weight, message.weight, dispatch_result, result, ); - Self::deposit_event(RawEvent::MessageDispatched( + Self::deposit_event(Event::MessageDispatched( source_chain, id, result.map(drop).map_err(|e| e.error), @@ -335,9 +356,19 @@ impl, I: Instance> MessageDispatch for /// For example, if a message is sent from a "regular" account on the source chain it will not be /// allowed to be dispatched as Root on the target chain. This is a useful check to do on the source /// chain _before_ sending a message whose dispatch will be rejected on the target chain. -pub fn verify_message_origin( +pub fn verify_message_origin< + SourceChainAccountId, + TargetChainAccountPublic, + TargetChainSignature, + Call, +>( sender_origin: &RawOrigin, - message: &MessagePayload, + message: &MessagePayload< + SourceChainAccountId, + TargetChainAccountPublic, + TargetChainSignature, + Call, + >, ) -> Result, BadOrigin> where SourceChainAccountId: PartialEq + Clone, @@ -346,21 +377,19 @@ where CallOrigin::SourceRoot => { ensure!(sender_origin == &RawOrigin::Root, BadOrigin); Ok(None) - } + }, CallOrigin::TargetAccount(ref source_account_id, _, _) => { - ensure!( - sender_origin == &RawOrigin::Signed(source_account_id.clone()), - BadOrigin - ); + ensure!(sender_origin == &RawOrigin::Signed(source_account_id.clone()), BadOrigin); Ok(Some(source_account_id.clone())) - } + }, CallOrigin::SourceAccount(ref source_account_id) => { ensure!( - sender_origin == &RawOrigin::Signed(source_account_id.clone()) || sender_origin == &RawOrigin::Root, + sender_origin == &RawOrigin::Signed(source_account_id.clone()) || + sender_origin == &RawOrigin::Root, BadOrigin ); Ok(Some(source_account_id.clone())) - } + }, } } @@ -397,7 +426,8 @@ mod tests { #![allow(clippy::from_over_into)] use super::*; - use frame_support::{dispatch::GetDispatchInfo, parameter_types, weights::Weight}; + use codec::Decode; + use frame_support::{parameter_types, weights::Weight}; use frame_system::{EventRecord, Phase}; use scale_info::TypeInfo; use sp_core::H256; @@ -408,7 +438,7 @@ mod tests { }; type AccountId = u64; - type MessageId = [u8; 4]; + type BridgeMessageId = [u8; 4]; const SOURCE_CHAIN_ID: ChainId = *b"srce"; const TARGET_CHAIN_ID: ChainId = *b"trgt"; @@ -494,7 +524,7 @@ mod tests { impl Config for TestRuntime { type Event = Event; - type MessageId = MessageId; + type BridgeMessageId = BridgeMessageId; type SourceChainAccountId = AccountId; type TargetChainAccountPublic = TestAccountPublic; type TargetChainSignature = TestSignature; @@ -525,16 +555,17 @@ mod tests { const TEST_WEIGHT: Weight = 1_000_000_000; fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); sp_io::TestExternalities::new(t) } fn prepare_message( origin: CallOrigin, call: Call, - ) -> as MessageDispatch::MessageId>>::Message { + ) -> as MessageDispatch< + AccountId, + ::BridgeMessageId, + >>::Message { MessagePayload { spec_version: TEST_SPEC_VERSION, weight: TEST_WEIGHT, @@ -546,20 +577,29 @@ mod tests { fn prepare_root_message( call: Call, - ) -> as MessageDispatch::MessageId>>::Message { + ) -> as MessageDispatch< + AccountId, + ::BridgeMessageId, + >>::Message { prepare_message(CallOrigin::SourceRoot, call) } fn prepare_target_message( call: Call, - ) -> as MessageDispatch::MessageId>>::Message { + ) -> as MessageDispatch< + AccountId, + ::BridgeMessageId, + >>::Message { let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1)); prepare_message(origin, call) } fn prepare_source_message( call: Call, - ) -> as MessageDispatch::MessageId>>::Message { + ) -> as MessageDispatch< + AccountId, + ::BridgeMessageId, + >>::Message { let origin = CallOrigin::SourceAccount(1); prepare_message(origin, call) } @@ -570,14 +610,20 @@ mod tests { let id = [0; 4]; const BAD_SPEC_VERSION: SpecVersion = 99; - let mut message = prepare_root_message(Call::System(frame_system::Call::::remark { + let mut message = prepare_root_message(Call::System(frame_system::Call::remark { remark: vec![1, 2, 3], })); let weight = message.weight; message.spec_version = BAD_SPEC_VERSION; System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert_eq!(result.unspent_weight, weight); assert!(!result.dispatch_result); @@ -585,12 +631,14 @@ mod tests { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageVersionSpecMismatch( - SOURCE_CHAIN_ID, - id, - TEST_SPEC_VERSION, - BAD_SPEC_VERSION - )), + event: Event::Dispatch( + call_dispatch::Event::::MessageVersionSpecMismatch( + SOURCE_CHAIN_ID, + id, + TEST_SPEC_VERSION, + BAD_SPEC_VERSION + ) + ), topics: vec![], }], ); @@ -601,17 +649,20 @@ mod tests { fn should_fail_on_weight_mismatch() { new_test_ext().execute_with(|| { let id = [0; 4]; - let call = Call::System(frame_system::Call::::remark { remark: vec![1, 2, 3] }); + let call = Call::System(frame_system::Call::remark { remark: vec![1, 2, 3] }); let call_weight = call.get_dispatch_info().weight; let mut message = prepare_root_message(call); message.weight = 7; - assert!( - call_weight != 7, - "needed for test to actually trigger a weight mismatch" - ); + assert!(call_weight != 7, "needed for test to actually trigger a weight mismatch"); System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert_eq!(result.unspent_weight, 7); assert!(!result.dispatch_result); @@ -619,12 +670,14 @@ mod tests { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageWeightMismatch( - SOURCE_CHAIN_ID, - id, - call_weight, - 7, - )), + event: Event::Dispatch( + call_dispatch::Event::::MessageWeightMismatch( + SOURCE_CHAIN_ID, + id, + call_weight, + 7, + ) + ), topics: vec![], }], ); @@ -639,12 +692,18 @@ mod tests { let call_origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(99)); let message = prepare_message( call_origin, - Call::System(frame_system::Call::::remark { remark: vec![1, 2, 3] }), + Call::System(frame_system::Call::remark { remark: vec![1, 2, 3] }), ); let weight = message.weight; System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert_eq!(result.unspent_weight, weight); assert!(!result.dispatch_result); @@ -652,10 +711,12 @@ mod tests { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageSignatureMismatch( - SOURCE_CHAIN_ID, - id - )), + event: Event::Dispatch( + call_dispatch::Event::::MessageSignatureMismatch( + SOURCE_CHAIN_ID, + id + ) + ), topics: vec![], }], ); @@ -668,7 +729,13 @@ mod tests { let id = [0; 4]; System::set_block_number(1); - Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Err(()), |_, _| unreachable!()); + Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Err(()), + |_, _| unreachable!(), + ); assert_eq!( System::events(), @@ -689,14 +756,20 @@ mod tests { new_test_ext().execute_with(|| { let id = [0; 4]; - let mut message = prepare_root_message(Call::System(frame_system::Call::::remark { + let mut message = prepare_root_message(Call::System(frame_system::Call::remark { remark: vec![1, 2, 3], })); let weight = message.weight; message.call.0 = vec![]; System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert_eq!(result.unspent_weight, weight); assert!(!result.dispatch_result); @@ -704,10 +777,12 @@ mod tests { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageCallDecodeFailed( - SOURCE_CHAIN_ID, - id - )), + event: Event::Dispatch( + call_dispatch::Event::::MessageCallDecodeFailed( + SOURCE_CHAIN_ID, + id + ) + ), topics: vec![], }], ); @@ -719,15 +794,20 @@ mod tests { new_test_ext().execute_with(|| { let id = [0; 4]; - let call = Call::System(frame_system::Call::::fill_block { - ratio: Perbill::from_percent(75), - }); + let call = + Call::System(frame_system::Call::fill_block { ratio: Perbill::from_percent(75) }); let weight = call.get_dispatch_info().weight; let mut message = prepare_root_message(call); message.weight = weight; System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert_eq!(result.unspent_weight, weight); assert!(!result.dispatch_result); @@ -735,10 +815,12 @@ mod tests { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageCallRejected( - SOURCE_CHAIN_ID, - id - )), + event: Event::Dispatch( + call_dispatch::Event::::MessageCallRejected( + SOURCE_CHAIN_ID, + id + ) + ), topics: vec![], }], ); @@ -750,14 +832,17 @@ mod tests { new_test_ext().execute_with(|| { let id = [0; 4]; - let mut message = prepare_root_message(Call::System(frame_system::Call::::remark { + let mut message = prepare_root_message(Call::System(frame_system::Call::remark { remark: vec![1, 2, 3], })); let weight = message.weight; message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Err(())); + let result = + Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| { + Err(()) + }); assert_eq!(result.unspent_weight, weight); assert!(!result.dispatch_result); @@ -765,15 +850,17 @@ mod tests { System::events(), vec![EventRecord { phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageDispatchPaymentFailed( - SOURCE_CHAIN_ID, - id, - AccountIdConverter::convert(derive_account_id::( + event: Event::Dispatch( + call_dispatch::Event::::MessageDispatchPaymentFailed( SOURCE_CHAIN_ID, - SourceAccount::Root - )), - TEST_WEIGHT, - )), + id, + AccountIdConverter::convert(derive_account_id::( + SOURCE_CHAIN_ID, + SourceAccount::Root + )), + TEST_WEIGHT, + ) + ), topics: vec![], }], ); @@ -785,13 +872,19 @@ mod tests { new_test_ext().execute_with(|| { let id = [0; 4]; - let mut message = prepare_root_message(Call::System(frame_system::Call::::remark { + let mut message = prepare_root_message(Call::System(frame_system::Call::remark { remark: vec![1, 2, 3], })); message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Ok(())); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| Ok(()), + ); assert!(result.dispatch_fee_paid_during_dispatch); assert!(result.dispatch_result); @@ -815,11 +908,17 @@ mod tests { new_test_ext().execute_with(|| { let id = [0; 4]; - let call = Call::System(frame_system::Call::::set_heap_pages { pages: 1 }); + let call = Call::System(frame_system::Call::set_heap_pages { pages: 1 }); let message = prepare_target_message(call); System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert!(!result.dispatch_fee_paid_during_dispatch); assert!(!result.dispatch_result); @@ -842,12 +941,18 @@ mod tests { fn should_dispatch_bridge_message_from_root_origin() { new_test_ext().execute_with(|| { let id = [0; 4]; - let message = prepare_root_message(Call::System(frame_system::Call::::remark { + let message = prepare_root_message(Call::System(frame_system::Call::remark { remark: vec![1, 2, 3], })); System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert!(!result.dispatch_fee_paid_during_dispatch); assert!(result.dispatch_result); @@ -871,11 +976,17 @@ mod tests { new_test_ext().execute_with(|| { let id = [0; 4]; - let call = Call::System(frame_system::Call::::remark { remark: vec![] }); + let call = Call::System(frame_system::Call::remark { remark: vec![] }); let message = prepare_target_message(call); System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert!(!result.dispatch_fee_paid_during_dispatch); assert!(result.dispatch_result); @@ -899,11 +1010,17 @@ mod tests { new_test_ext().execute_with(|| { let id = [0; 4]; - let call = Call::System(frame_system::Call::::remark { remark: vec![] }); + let call = Call::System(frame_system::Call::remark { remark: vec![] }); let message = prepare_source_message(call); System::set_block_number(1); - let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!()); + let result = Dispatch::dispatch( + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + id, + Ok(message), + |_, _| unreachable!(), + ); assert!(!result.dispatch_fee_paid_during_dispatch); assert!(result.dispatch_result); @@ -924,60 +1041,42 @@ mod tests { #[test] fn origin_is_checked_when_verifying_sending_message_using_source_root_account() { - let call = Call::System(frame_system::Call::::remark { remark: vec![] }); + let call = Call::System(frame_system::Call::remark { remark: vec![] }); let message = prepare_root_message(call); // When message is sent by Root, CallOrigin::SourceRoot is allowed assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(None))); // when message is sent by some real account, CallOrigin::SourceRoot is not allowed - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(1), &message), - Err(BadOrigin) - )); + assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Err(BadOrigin))); } #[test] fn origin_is_checked_when_verifying_sending_message_using_target_account() { - let call = Call::System(frame_system::Call::::remark { remark: vec![] }); + let call = Call::System(frame_system::Call::remark { remark: vec![] }); let message = prepare_target_message(call); // When message is sent by Root, CallOrigin::TargetAccount is not allowed - assert!(matches!( - verify_message_origin(&RawOrigin::Root, &message), - Err(BadOrigin) - )); + assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Err(BadOrigin))); // When message is sent by some other account, it is rejected - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(2), &message), - Err(BadOrigin) - )); + assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin))); // When message is sent by a real account, it is allowed to have origin // CallOrigin::TargetAccount - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(1), &message), - Ok(Some(1)) - )); + assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1)))); } #[test] fn origin_is_checked_when_verifying_sending_message_using_source_account() { - let call = Call::System(frame_system::Call::::remark { remark: vec![] }); + let call = Call::System(frame_system::Call::remark { remark: vec![] }); let message = prepare_source_message(call); // Sending a message from the expected origin account works - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(1), &message), - Ok(Some(1)) - )); + assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1)))); // If we send a message from a different account, it is rejected - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(2), &message), - Err(BadOrigin) - )); + assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin))); // The Root account is allowed to assume any expected origin account assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(Some(1)))); diff --git a/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml b/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml deleted file mode 100644 index 6177ed3734ca823906ca5e6abf52038066eb74d1..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "ethereum-contract-builtin" -description = "Small crate that helps Solidity contract to verify finality proof." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -ethereum-types = "0.11.0" -finality-grandpa = "0.14.4" -hex = "0.4" -log = "0.4.14" - -# Runtime/chain specific dependencies - -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs b/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs deleted file mode 100644 index a07f838cf8d64d7a0fabe051c5593d75a502ca57..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use codec::{Decode, Encode}; -use ethereum_types::U256; -use finality_grandpa::voter_set::VoterSet; -use rialto_runtime::{Block, BlockNumber, Hash, Header as RuntimeHeader}; -use sp_blockchain::Error as ClientError; -use sp_finality_grandpa::{AuthorityList, ConsensusLog, GRANDPA_ENGINE_ID}; - -/// Builtin errors. -#[derive(Debug)] -pub enum Error { - /// Failed to decode block number. - BlockNumberDecode, - /// Failed to decode Substrate header. - HeaderDecode(codec::Error), - /// Failed to decode best voters set. - BestSetDecode(codec::Error), - /// Best voters set is invalid. - InvalidBestSet, - /// Failed to decode finality proof. - FinalityProofDecode(codec::Error), - /// Failed to verify justification. - JustificationVerify(Box), -} - -/// Substrate header. -#[derive(Debug, PartialEq)] -pub struct Header { - /// Header hash. - pub hash: Hash, - /// Parent header hash. - pub parent_hash: Hash, - /// Header number. - pub number: BlockNumber, - /// GRANDPA validators change signal. - pub signal: Option, -} - -/// GRANDPA validators set change signal. -#[derive(Debug, PartialEq)] -pub struct ValidatorsSetSignal { - /// Signal delay. - pub delay: BlockNumber, - /// New validators set. - pub validators: Vec, -} - -/// Convert from U256 to BlockNumber. Fails if `U256` value isn't fitting within `BlockNumber` -/// limits (the runtime referenced by this module uses u32 as `BlockNumber`). -pub fn to_substrate_block_number(number: U256) -> Result { - let substrate_block_number = match number == number.low_u32().into() { - true => Ok(number.low_u32()), - false => Err(Error::BlockNumberDecode), - }; - - log::trace!( - target: "bridge-builtin", - "Parsed Substrate block number from {}: {:?}", - number, - substrate_block_number, - ); - - substrate_block_number -} - -/// Convert from BlockNumber to U256. -pub fn from_substrate_block_number(number: BlockNumber) -> Result { - Ok(U256::from(number as u64)) -} - -/// Parse Substrate header. -pub fn parse_substrate_header(raw_header: &[u8]) -> Result { - let substrate_header = RuntimeHeader::decode(&mut &*raw_header) - .map(|header| Header { - hash: header.hash(), - parent_hash: header.parent_hash, - number: header.number, - signal: sp_runtime::traits::Header::digest(&header) - .log(|log| { - log.as_consensus().and_then(|(engine_id, log)| { - if engine_id == GRANDPA_ENGINE_ID { - Some(log) - } else { - None - } - }) - }) - .and_then(|log| ConsensusLog::decode(&mut &*log).ok()) - .and_then(|log| match log { - ConsensusLog::ScheduledChange(scheduled_change) => Some(ValidatorsSetSignal { - delay: scheduled_change.delay, - validators: scheduled_change.next_authorities.encode(), - }), - _ => None, - }), - }) - .map_err(Error::HeaderDecode); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate header {}: {:?}", - if substrate_header.is_ok() { - format!("<{}-bytes-blob>", raw_header.len()) - } else { - hex::encode(raw_header) - }, - substrate_header, - ); - - substrate_header -} - -/// Verify GRANDPA finality proof. -pub fn verify_substrate_finality_proof( - finality_target_number: BlockNumber, - finality_target_hash: Hash, - best_set_id: u64, - raw_best_set: &[u8], - raw_finality_proof: &[u8], -) -> Result<(), Error> { - let best_set = AuthorityList::decode(&mut &*raw_best_set) - .map_err(Error::BestSetDecode) - .and_then(|authorities| VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet)); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate authorities set {}: {:?}", - if best_set.is_ok() { - format!("<{}-bytes-blob>", raw_best_set.len()) - } else { - hex::encode(raw_best_set) - }, - best_set, - ); - - let best_set = best_set?; - - let verify_result = sc_finality_grandpa::GrandpaJustification::::decode_and_verify_finalizes( - raw_finality_proof, - (finality_target_hash, finality_target_number), - best_set_id, - &best_set, - ) - .map_err(Box::new) - .map_err(Error::JustificationVerify) - .map(|_| ()); - - log::debug!( - target: "bridge-builtin", - "Verified Substrate finality proof {}: {:?}", - if verify_result.is_ok() { - format!("<{}-bytes-blob>", raw_finality_proof.len()) - } else { - hex::encode(raw_finality_proof) - }, - verify_result, - ); - - verify_result -} - -#[cfg(test)] -mod tests { - use super::*; - use rialto_runtime::DigestItem; - use sp_core::crypto::Public; - use sp_finality_grandpa::{AuthorityId, ScheduledChange}; - use sp_runtime::generic::Digest; - - #[test] - fn to_substrate_block_number_succeeds() { - assert_eq!(to_substrate_block_number(U256::zero()).unwrap(), 0); - assert_eq!( - to_substrate_block_number(U256::from(std::u32::MAX as u64)).unwrap(), - 0xFFFFFFFF - ); - } - - #[test] - fn to_substrate_block_number_fails() { - assert!(matches!( - to_substrate_block_number(U256::from(std::u32::MAX as u64 + 1)), - Err(Error::BlockNumberDecode) - )); - } - - #[test] - fn from_substrate_block_number_succeeds() { - assert_eq!(from_substrate_block_number(0).unwrap(), U256::zero()); - assert_eq!( - from_substrate_block_number(std::u32::MAX).unwrap(), - U256::from(std::u32::MAX) - ); - } - - #[test] - fn substrate_header_without_signal_parsed() { - let raw_header = RuntimeHeader { - parent_hash: [0u8; 32].into(), - number: 0, - state_root: "b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e7" - .parse() - .unwrap(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" - .parse() - .unwrap(), - digest: Default::default(), - } - .encode(); - assert_eq!( - raw_header, - hex::decode("000000000000000000000000000000000000000000000000000000000000000000b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e703170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c11131400").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "afbbeb92bf6ff14f60bdef0aa89f043dd403659ae82665238810ace0d761f6d0" - .parse() - .unwrap(), - parent_hash: Default::default(), - number: 0, - signal: None, - }, - ); - } - - #[test] - fn substrate_header_with_signal_parsed() { - let authorities = vec![ - (AuthorityId::from_slice(&[1; 32]), 101), - (AuthorityId::from_slice(&[3; 32]), 103), - ]; - let mut digest = Digest::default(); - digest.push(DigestItem::Consensus( - GRANDPA_ENGINE_ID, - ConsensusLog::ScheduledChange(ScheduledChange { - next_authorities: authorities.clone(), - delay: 8, - }) - .encode(), - )); - - let raw_header = RuntimeHeader { - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - state_root: "822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aa" - .parse() - .unwrap(), - extrinsics_root: "e7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928" - .parse() - .unwrap(), - digest, - } - .encode(); - assert_eq!( - raw_header, - hex::decode("c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b20822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aae7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928040446524e4b59010108010101010101010101010101010101010101010101010101010101010101010165000000000000000303030303030303030303030303030303030303030303030303030303030303670000000000000008000000").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "3dfebb280bd87a4640f89d7f2adecd62b88148747bff5b63af6e1634ee37a56e" - .parse() - .unwrap(), - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - signal: Some(ValidatorsSetSignal { - delay: 8, - validators: authorities.encode(), - }), - }, - ); - } - - /// Number of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8; - /// Hash of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775"; - /// Id of authorities set that have generated example justification. Could be computed by tracking - /// every set change in canonized headers. - const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0; - /// Encoded authorities set that has generated example justification. Could be fetched from `ScheduledChange` - /// digest of the block that has scheduled this set OR by calling `GrandpaApi::grandpa_authorities()` at - /// appropriate block. - const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000"; - /// Example justification. Could be fetched by calling 'chain_getBlock' RPC. - const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900"; - - #[test] - fn substrate_header_parse_fails() { - assert!(matches!(parse_substrate_header(&[]), Err(_))); - } - - #[test] - fn verify_substrate_finality_proof_succeeds() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_block_is_finalized() { - verify_substrate_finality_proof( - 4, - Default::default(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode("deadbeef").unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_id_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 42, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_proof_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 0, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode("deadbeef").unwrap(), - ) - .unwrap_err(); - } -} diff --git a/polkadot/bridges/modules/ethereum/Cargo.toml b/polkadot/bridges/modules/ethereum/Cargo.toml deleted file mode 100644 index 94e4087010d6e8fb1939b447ffc732f85edcc79d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[package] -name = "pallet-bridge-eth-poa" -description = "A Substrate Runtime module that is able to verify PoA headers and their finality." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"], optional = true } -log = { version = "0.4.14", default-features = false } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-eth-poa = { path = "../../primitives/ethereum-poa", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -libsecp256k1 = { version = "0.3.4", features = ["hmac"] } -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "bp-eth-poa/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "serde", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "libsecp256k1", -] diff --git a/polkadot/bridges/modules/ethereum/src/benchmarking.rs b/polkadot/bridges/modules/ethereum/src/benchmarking.rs deleted file mode 100644 index 960dbe9afec24ccafe56702f53ca3c8a0e6e97a6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/benchmarking.rs +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use super::*; - -use crate::test_utils::{ - build_custom_header, build_genesis_header, insert_header, validator_utils::*, validators_change_receipt, - HeaderBuilder, -}; - -use bp_eth_poa::{compute_merkle_root, U256}; -use frame_benchmarking::benchmarks_instance; -use frame_system::RawOrigin; - -benchmarks_instance! { - // Benchmark `import_unsigned_header` extrinsic with the best possible conditions: - // * Parent header is finalized. - // * New header doesn't require receipts. - // * Nothing is finalized by new header. - // * Nothing is pruned by new header. - import_unsigned_header_best_case { - let n in 1..1000; - - let num_validators = 2; - let initial_header = initialize_bench::(num_validators); - - // prepare header to be inserted - let header = build_custom_header( - &validator(1), - &initial_header, - |mut header| { - header.gas_limit = header.gas_limit + U256::from(n); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 1); - assert_eq!(storage.finalized_block().number, 0); - } - - // Our goal with this bench is to try and see the effect that finalizing difference ranges of - // blocks has on our import time. As such we need to make sure that we keep the number of - // validators fixed while changing the number blocks finalized (the complexity parameter) by - // importing the last header. - // - // One important thing to keep in mind is that the runtime provides a finality cache in order to - // reduce the overhead of header finalization. However, this is only triggered every 16 blocks. - import_unsigned_finality { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 1..7; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // Basically the exact same as `import_unsigned_finality` but with a different range for the - // complexity parameter. In this bench we use a larger range of blocks to see how performance - // changes when the finality cache kicks in (>16 blocks). - import_unsigned_finality_with_cache { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 7..100; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // A block import may trigger a pruning event, which adds extra work to the import progress. - // In this bench we trigger a pruning event in order to see how much extra time is spent by the - // runtime dealing with it. In the Ethereum Pallet, we're limited pruning to eight blocks in a - // single import, as dictated by MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT. - import_unsigned_pruning { - let n in 1..MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT as u32; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 3; - let initial_header = initialize_bench::(num_validators as usize); - let validators = validators(num_validators); - - // Want to prune eligible blocks between [0, n) - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: n as u64, - }); - - let mut parent = initial_header; - for i in 1..=n { - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - parent = header; - } - - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - let max_pruned: u64 = (n - 1) as _; - assert_eq!(storage.best_block().0.number, (n + 1) as u64); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&max_pruned).is_none()); - } - - // The goal of this bench is to import a block which contains a transaction receipt. The receipt - // will contain a validator set change. Verifying the receipt root is an expensive operation to - // do, which is why we're interested in benchmarking it. - import_unsigned_with_receipts { - let n in 1..100; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 1; - let initial_header = initialize_bench::(num_validators as usize); - - let mut receipts = vec![]; - for i in 1..=n { - let receipt = validators_change_receipt(Default::default()); - receipts.push(receipt) - } - let encoded_receipts = receipts.iter().map(|r| r.rlp()); - - // We need this extra header since this is what signals a validator set transition. This - // will ensure that the next header is within the "Contract" window - let header1 = HeaderBuilder::with_parent(&initial_header).sign_by(&validator(0)); - insert_header(&mut storage, header1.clone()); - - let header = build_custom_header( - &validator(0), - &header1, - |mut header| { - // Logs Bloom signals a change in validator set - header.log_bloom = (&[0xff; 256]).into(); - header.receipts_root = compute_merkle_root(encoded_receipts); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, header, Some(receipts)) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 2); - } -} - -fn initialize_bench, I: Instance>(num_validators: usize) -> AuraHeader { - // Initialize storage with some initial header - let initial_header = build_genesis_header(&validator(0)); - let initial_difficulty = initial_header.difficulty; - let initial_validators = validators_addresses(num_validators as usize); - - initialize_storage::(&initial_header, initial_difficulty, &initial_validators); - - initial_header -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, TestRuntime}; - use frame_support::assert_ok; - - #[test] - fn insert_unsigned_header_best_case() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_header_best_case::()); - }); - } - - #[test] - fn insert_unsigned_header_finality() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality::()); - }); - } - - #[test] - fn insert_unsigned_header_finality_with_cache() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality_with_cache::()); - }); - } - - #[test] - fn insert_unsigned_header_pruning() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_pruning::()); - }); - } - - #[test] - fn insert_unsigned_header_receipts() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_with_receipts::()); - }); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/error.rs b/polkadot/bridges/modules/ethereum/src/error.rs deleted file mode 100644 index ad798379da7dcc827fdb31bb8e6e4dae57ee7307..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/error.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use sp_runtime::RuntimeDebug; - -/// Header import error. -#[derive(Clone, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub enum Error { - /// The header is beyond last finalized and can not be imported. - AncientHeader = 0, - /// The header is already imported. - KnownHeader = 1, - /// Seal has an incorrect format. - InvalidSealArity = 2, - /// Block number isn't sensible. - RidiculousNumber = 3, - /// Block has too much gas used. - TooMuchGasUsed = 4, - /// Gas limit header field is invalid. - InvalidGasLimit = 5, - /// Extra data is of an invalid length. - ExtraDataOutOfBounds = 6, - /// Timestamp header overflowed. - TimestampOverflow = 7, - /// The parent header is missing from the blockchain. - MissingParentBlock = 8, - /// The header step is missing from the header. - MissingStep = 9, - /// The header signature is missing from the header. - MissingSignature = 10, - /// Empty steps are missing from the header. - MissingEmptySteps = 11, - /// The same author issued different votes at the same step. - DoubleVote = 12, - /// Validation proof insufficient. - InsufficientProof = 13, - /// Difficulty header field is invalid. - InvalidDifficulty = 14, - /// The received block is from an incorrect proposer. - NotValidator = 15, - /// Missing transaction receipts for the operation. - MissingTransactionsReceipts = 16, - /// Redundant transaction receipts are provided. - RedundantTransactionsReceipts = 17, - /// Provided transactions receipts are not matching the header. - TransactionsReceiptsMismatch = 18, - /// Can't accept unsigned header from the far future. - UnsignedTooFarInTheFuture = 19, - /// Trying to finalize sibling of finalized block. - TryingToFinalizeSibling = 20, - /// Header timestamp is ahead of on-chain timestamp - HeaderTimestampIsAhead = 21, -} - -impl Error { - pub fn msg(&self) -> &'static str { - match *self { - Error::AncientHeader => "Header is beyound last finalized and can not be imported", - Error::KnownHeader => "Header is already imported", - Error::InvalidSealArity => "Header has an incorrect seal", - Error::RidiculousNumber => "Header has too large number", - Error::TooMuchGasUsed => "Header has too much gas used", - Error::InvalidGasLimit => "Header has invalid gas limit", - Error::ExtraDataOutOfBounds => "Header has too large extra data", - Error::TimestampOverflow => "Header has too large timestamp", - Error::MissingParentBlock => "Header has unknown parent hash", - Error::MissingStep => "Header is missing step seal", - Error::MissingSignature => "Header is missing signature seal", - Error::MissingEmptySteps => "Header is missing empty steps seal", - Error::DoubleVote => "Header has invalid step in seal", - Error::InsufficientProof => "Header has insufficient proof", - Error::InvalidDifficulty => "Header has invalid difficulty", - Error::NotValidator => "Header is sealed by unexpected validator", - Error::MissingTransactionsReceipts => "The import operation requires transactions receipts", - Error::RedundantTransactionsReceipts => "Redundant transactions receipts are provided", - Error::TransactionsReceiptsMismatch => "Invalid transactions receipts provided", - Error::UnsignedTooFarInTheFuture => "The unsigned header is too far in future", - Error::TryingToFinalizeSibling => "Trying to finalize sibling of finalized block", - Error::HeaderTimestampIsAhead => "Header timestamp is ahead of on-chain timestamp", - } - } - - /// Return unique error code. - pub fn code(&self) -> u8 { - *self as u8 - } -} diff --git a/polkadot/bridges/modules/ethereum/src/finality.rs b/polkadot/bridges/modules/ethereum/src/finality.rs deleted file mode 100644 index 4ab276db777e12040f03f4cc8f5ba5edb1220945..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/finality.rs +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::Storage; -use bp_eth_poa::{public_to_address, Address, AuraHeader, HeaderId, SealedEmptyStep, H256}; -use codec::{Decode, Encode}; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::RuntimeDebug; -use sp_std::collections::{ - btree_map::{BTreeMap, Entry}, - btree_set::BTreeSet, - vec_deque::VecDeque, -}; -use sp_std::prelude::*; - -/// Cached finality votes for given block. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct CachedFinalityVotes { - /// True if we have stopped at best finalized block' sibling. This means - /// that we are trying to finalize block from fork that has forked before - /// best finalized. - pub stopped_at_finalized_sibling: bool, - /// Header ancestors that were read while we have been searching for - /// cached votes entry. Newest header has index 0. - pub unaccounted_ancestry: VecDeque<(HeaderId, Option, AuraHeader)>, - /// Cached finality votes, if they have been found. The associated - /// header is not included into `unaccounted_ancestry`. - pub votes: Option>, -} - -/// Finality effects. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct FinalityEffects { - /// Finalized headers. - pub finalized_headers: Vec<(HeaderId, Option)>, - /// Finality votes used in computation. - pub votes: FinalityVotes, -} - -/// Finality votes for given block. -#[derive(RuntimeDebug, Decode, Encode)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct FinalityVotes { - /// Number of votes per each validator. - pub votes: BTreeMap, - /// Ancestry blocks with oldest ancestors at the beginning and newest at the - /// end of the queue. - pub ancestry: VecDeque>, -} - -/// Information about block ancestor that is used in computations. -#[derive(RuntimeDebug, Decode, Encode)] -#[cfg_attr(test, derive(Clone, Default, PartialEq))] -pub struct FinalityAncestor { - /// Bock id. - pub id: HeaderId, - /// Block submitter. - pub submitter: Option, - /// Validators that have signed this block and empty steps on top - /// of this block. - pub signers: BTreeSet
, -} - -/// Tries to finalize blocks when given block is imported. -/// -/// Returns numbers and hashes of finalized blocks in ascending order. -pub fn finalize_blocks( - storage: &S, - best_finalized: HeaderId, - header_validators: (HeaderId, &[Address]), - id: HeaderId, - submitter: Option<&S::Submitter>, - header: &AuraHeader, - two_thirds_majority_transition: u64, -) -> Result, Error> { - // compute count of voters for every unfinalized block in ancestry - let validators = header_validators.1.iter().collect(); - let votes = prepare_votes( - header - .parent_id() - .map(|parent_id| { - storage.cached_finality_votes(&parent_id, &best_finalized, |hash| { - *hash == header_validators.0.hash || *hash == best_finalized.hash - }) - }) - .unwrap_or_default(), - best_finalized, - &validators, - id, - header, - submitter.cloned(), - )?; - - // now let's iterate in reverse order && find just finalized blocks - let mut finalized_headers = Vec::new(); - let mut current_votes = votes.votes.clone(); - for ancestor in &votes.ancestry { - if !is_finalized( - &validators, - ¤t_votes, - ancestor.id.number >= two_thirds_majority_transition, - ) { - break; - } - - remove_signers_votes(&ancestor.signers, &mut current_votes); - finalized_headers.push((ancestor.id, ancestor.submitter.clone())); - } - - Ok(FinalityEffects { - finalized_headers, - votes, - }) -} - -/// Returns true if there are enough votes to treat this header as finalized. -fn is_finalized( - validators: &BTreeSet<&Address>, - votes: &BTreeMap, - requires_two_thirds_majority: bool, -) -> bool { - (!requires_two_thirds_majority && votes.len() * 2 > validators.len()) - || (requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2) -} - -/// Prepare 'votes' of header and its ancestors' signers. -pub(crate) fn prepare_votes( - mut cached_votes: CachedFinalityVotes, - best_finalized: HeaderId, - validators: &BTreeSet<&Address>, - id: HeaderId, - header: &AuraHeader, - submitter: Option, -) -> Result, Error> { - // if we have reached finalized block sibling, then we're trying - // to switch finalized blocks - if cached_votes.stopped_at_finalized_sibling { - return Err(Error::TryingToFinalizeSibling); - } - - // this fn can only work with single validators set - if !validators.contains(&header.author) { - return Err(Error::NotValidator); - } - - // now we have votes that were valid when some block B has been inserted - // things may have changed a bit, but we do not need to read anything else - // from the db, because we have ancestry - // so the only thing we need to do is: - // 1) remove votes from blocks that have been finalized after B has been inserted; - // 2) add votes from B descendants - let mut votes = cached_votes.votes.unwrap_or_default(); - - // remove votes from finalized blocks - while let Some(old_ancestor) = votes.ancestry.pop_front() { - if old_ancestor.id.number > best_finalized.number { - votes.ancestry.push_front(old_ancestor); - break; - } - - remove_signers_votes(&old_ancestor.signers, &mut votes.votes); - } - - // add votes from new blocks - let mut parent_empty_step_signers = empty_steps_signers(header); - let mut unaccounted_ancestry = VecDeque::new(); - while let Some((ancestor_id, ancestor_submitter, ancestor)) = cached_votes.unaccounted_ancestry.pop_front() { - let mut signers = empty_steps_signers(&ancestor); - sp_std::mem::swap(&mut signers, &mut parent_empty_step_signers); - signers.insert(ancestor.author); - - add_signers_votes(validators, &signers, &mut votes.votes)?; - - unaccounted_ancestry.push_front(FinalityAncestor { - id: ancestor_id, - submitter: ancestor_submitter, - signers, - }); - } - votes.ancestry.extend(unaccounted_ancestry); - - // add votes from block itself - let mut header_signers = BTreeSet::new(); - header_signers.insert(header.author); - *votes.votes.entry(header.author).or_insert(0) += 1; - votes.ancestry.push_back(FinalityAncestor { - id, - submitter, - signers: header_signers, - }); - - Ok(votes) -} - -/// Increase count of 'votes' for every passed signer. -/// Fails if at least one of signers is not in the `validators` set. -fn add_signers_votes( - validators: &BTreeSet<&Address>, - signers_to_add: &BTreeSet
, - votes: &mut BTreeMap, -) -> Result<(), Error> { - for signer in signers_to_add { - if !validators.contains(signer) { - return Err(Error::NotValidator); - } - - *votes.entry(*signer).or_insert(0) += 1; - } - - Ok(()) -} - -/// Decrease 'votes' count for every passed signer. -fn remove_signers_votes(signers_to_remove: &BTreeSet
, votes: &mut BTreeMap) { - for signer in signers_to_remove { - match votes.entry(*signer) { - Entry::Occupied(mut entry) => { - if *entry.get() <= 1 { - entry.remove(); - } else { - *entry.get_mut() -= 1; - } - } - Entry::Vacant(_) => unreachable!("we only remove signers that have been added; qed"), - } - } -} - -/// Returns unique set of empty steps signers. -fn empty_steps_signers(header: &AuraHeader) -> BTreeSet
{ - header - .empty_steps() - .into_iter() - .flatten() - .filter_map(|step| empty_step_signer(&step, &header.parent_hash)) - .collect::>() -} - -/// Returns author of empty step signature. -fn empty_step_signer(empty_step: &SealedEmptyStep, parent_hash: &H256) -> Option
{ - let message = empty_step.message(parent_hash); - secp256k1_ecdsa_recover(empty_step.signature.as_fixed_bytes(), message.as_fixed_bytes()) - .ok() - .map(|public| public_to_address(&public)) -} - -impl Default for CachedFinalityVotes { - fn default() -> Self { - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: VecDeque::new(), - votes: None, - } - } -} - -impl Default for FinalityVotes { - fn default() -> Self { - FinalityVotes { - votes: BTreeMap::new(), - ancestry: VecDeque::new(), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime}; - use crate::{BridgeStorage, FinalityCache, HeaderToImport}; - use frame_support::StorageMap; - - const TOTAL_VALIDATORS: usize = 5; - - #[test] - fn verifies_header_author() { - run_test(TOTAL_VALIDATORS, |_| { - assert_eq!( - finalize_blocks( - &BridgeStorage::::new(), - Default::default(), - (Default::default(), &[]), - Default::default(), - None, - &AuraHeader::default(), - 0, - ), - Err(Error::NotValidator), - ); - }); - } - - #[test] - fn finalize_blocks_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - // let's say we have 5 validators (we need 'votes' from 3 validators to achieve - // finality) - let mut storage = BridgeStorage::::new(); - - // when header#1 is inserted, nothing is finalized (1 vote) - let header1 = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(0)); - let id1 = header1.compute_id(); - let mut header_to_import = HeaderToImport { - context: storage.import_context(None, &header1.parent_hash).unwrap(), - is_best: true, - id: id1, - header: header1, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: Default::default(), - }; - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id1, - None, - &header_to_import.header, - u64::MAX, - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#2 is inserted, nothing is finalized (2 votes) - header_to_import.header = HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1)); - header_to_import.id = header_to_import.header.compute_id(); - let id2 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id2, - None, - &header_to_import.header, - u64::MAX, - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#3 is inserted, header#1 is finalized (3 votes) - header_to_import.header = HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2)); - header_to_import.id = header_to_import.header.compute_id(); - let id3 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id3, - None, - &header_to_import.header, - u64::MAX, - ) - .map(|eff| eff.finalized_headers), - Ok(vec![(id1, None)]), - ); - storage.insert_header(header_to_import); - }); - } - - #[test] - fn cached_votes_are_updated_with_ancestry() { - // we're inserting header#5 - // cached votes are from header#3 - // header#4 has finalized header#1 and header#2 - // => when inserting header#5, we need to: - // 1) remove votes from header#1 and header#2 - // 2) add votes from header#4 and header#5 - let validators = validators_addresses(5); - let headers = (1..6) - .map(|number| HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1))) - .collect::>(); - let ancestry = headers - .iter() - .map(|header| FinalityAncestor { - id: header.compute_id(), - signers: vec![header.author].into_iter().collect(), - ..Default::default() - }) - .collect::>(); - let header5 = headers[4].clone(); - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: vec![(headers[3].compute_id(), None, headers[3].clone()),] - .into_iter() - .collect(), - votes: Some(FinalityVotes { - votes: vec![(validators[0], 1), (validators[1], 1), (validators[2], 1),] - .into_iter() - .collect(), - ancestry: ancestry[..3].iter().cloned().collect(), - }), - }, - headers[1].compute_id(), - &validators.iter().collect(), - header5.compute_id(), - &header5, - None, - ) - .unwrap(), - FinalityVotes { - votes: vec![(validators[2], 1), (validators[3], 1), (validators[4], 1),] - .into_iter() - .collect(), - ancestry: ancestry[2..].iter().cloned().collect(), - }, - ); - } - - #[test] - fn prepare_votes_respects_finality_cache() { - run_test(TOTAL_VALIDATORS, |ctx| { - // we need signatures of 3 validators to finalize block - let mut storage = BridgeStorage::::new(); - - // headers 1..3 are signed by validator#0 - // headers 4..6 are signed by validator#1 - // headers 7..9 are signed by validator#2 - let mut hashes = Vec::new(); - let mut headers = Vec::new(); - let mut ancestry = Vec::new(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..10 { - let header = HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - hashes.push(id.hash); - ancestry.push(FinalityAncestor { - id: header.compute_id(), - submitter: None, - signers: vec![header.author].into_iter().collect(), - }); - headers.push(header); - parent_hash = id.hash; - } - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly without cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 3), (ctx.addresses[2], 1)] - .into_iter() - .collect(), - ancestry: ancestry[..7].iter().cloned().collect(), - }; - let id7 = headers[6].compute_id(); - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // cached votes at #5 - let expected_votes_at_5 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 2)].into_iter().collect(), - ancestry: ancestry[..5].iter().cloned().collect(), - }; - FinalityCache::::insert(hashes[4], expected_votes_at_5); - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly with cache - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // when we're inserting header#7 and last finalized header is 3: - // check that votes at #7 are computed correctly with cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[1], 3), (ctx.addresses[2], 1)].into_iter().collect(), - ancestry: ancestry[3..7].iter().cloned().collect(), - }; - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &headers.get(2).unwrap().compute_id(), - |hash| *hash == hashes[2], - ), - headers[2].compute_id(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - }); - } - - #[test] - fn prepare_votes_fails_when_finalized_sibling_is_in_ancestry() { - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { - stopped_at_finalized_sibling: true, - ..Default::default() - }, - Default::default(), - &validators_addresses(3).iter().collect(), - Default::default(), - &Default::default(), - None, - ), - Err(Error::TryingToFinalizeSibling), - ); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/import.rs b/polkadot/bridges/modules/ethereum/src/import.rs deleted file mode 100644 index a6945240cde4da2698de11a5d897b3a0d59a800d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/import.rs +++ /dev/null @@ -1,609 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::finality::finalize_blocks; -use crate::validators::{Validators, ValidatorsConfiguration}; -use crate::verification::{is_importable_header, verify_aura_header}; -use crate::{AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage}; -use bp_eth_poa::{AuraHeader, HeaderId, Receipt}; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; - -/// Imports bunch of headers and updates blocks finality. -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// If successful, returns tuple where first element is the number of useful headers -/// we have imported and the second element is the number of useless headers (duplicate) -/// we have NOT imported. -/// Returns error if fatal error has occured during import. Some valid headers may be -/// imported in this case. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_headers( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - headers: Vec<(AuraHeader, Option>)>, - chain_time: &CT, - finalized_headers: &mut BTreeMap, -) -> Result<(u64, u64), Error> { - let mut useful = 0; - let mut useless = 0; - for (header, receipts) in headers { - let import_result = import_header( - storage, - pruning_strategy, - aura_config, - validators_config, - submitter.clone(), - header, - chain_time, - receipts, - ); - - match import_result { - Ok((_, finalized)) => { - for (_, submitter) in finalized { - if let Some(submitter) = submitter { - *finalized_headers.entry(submitter).or_default() += 1; - } - } - useful += 1; - } - Err(Error::AncientHeader) | Err(Error::KnownHeader) => useless += 1, - Err(error) => return Err(error), - } - } - - Ok((useful, useless)) -} - -/// A vector of finalized headers and their submitters. -pub type FinalizedHeaders = Vec<(HeaderId, Option<::Submitter>)>; - -/// Imports given header and updates blocks finality (if required). -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// -/// Returns imported block id and list of all finalized headers. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_header( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - header: AuraHeader, - chain_time: &CT, - receipts: Option>, -) -> Result<(HeaderId, FinalizedHeaders), Error> { - // first check that we are able to import this header at all - let (header_id, finalized_id) = is_importable_header(storage, &header)?; - - // verify header - let import_context = verify_aura_header(storage, aura_config, submitter, &header, chain_time)?; - - // check if block schedules new validators - let validators = Validators::new(validators_config); - let (scheduled_change, enacted_change) = validators.extract_validators_change(&header, receipts)?; - - // check if block finalizes some other blocks and corresponding scheduled validators - let validators_set = import_context.validators_set(); - let finalized_blocks = finalize_blocks( - storage, - finalized_id, - (validators_set.enact_block, &validators_set.validators), - header_id, - import_context.submitter(), - &header, - aura_config.two_thirds_majority_transition, - )?; - let enacted_change = enacted_change - .map(|validators| ChangeToEnact { - signal_block: None, - validators, - }) - .or_else(|| validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers)); - - // NOTE: we can't return Err() from anywhere below this line - // (because otherwise we'll have inconsistent storage if transaction will fail) - - // and finally insert the block - let (best_id, best_total_difficulty) = storage.best_block(); - let total_difficulty = import_context.total_difficulty() + header.difficulty; - let is_best = total_difficulty > best_total_difficulty; - storage.insert_header(import_context.into_import_header( - is_best, - header_id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finalized_blocks.votes, - )); - - // compute upper border of updated pruning range - let new_best_block_id = if is_best { header_id } else { best_id }; - let new_best_finalized_block_id = finalized_blocks.finalized_headers.last().map(|(id, _)| *id); - let pruning_upper_bound = pruning_strategy.pruning_upper_bound( - new_best_block_id.number, - new_best_finalized_block_id - .map(|id| id.number) - .unwrap_or(finalized_id.number), - ); - - // now mark finalized headers && prune old headers - storage.finalize_and_prune_headers(new_best_finalized_block_id, pruning_upper_bound); - - Ok((header_id, finalized_blocks.finalized_headers)) -} - -/// Returns true if transactions receipts are required to import given header. -pub fn header_import_requires_receipts( - storage: &S, - validators_config: &ValidatorsConfiguration, - header: &AuraHeader, -) -> bool { - is_importable_header(storage, header) - .map(|_| Validators::new(validators_config)) - .map(|validators| validators.maybe_signals_validators_change(header)) - .unwrap_or(false) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - run_test, secret_to_address, test_aura_config, test_validators_config, validator, validators_addresses, - validators_change_receipt, HeaderBuilder, KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT, - }; - use crate::validators::ValidatorsSource; - use crate::DefaultInstance; - use crate::{BlocksToPrune, BridgeStorage, Headers, PruningRange}; - use frame_support::{StorageMap, StorageValue}; - use secp256k1::SecretKey; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn rejects_finalized_block_competitors() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - storage.finalize_and_prune_headers( - Some(HeaderId { - number: 100, - ..Default::default() - }), - 0, - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - Default::default(), - &(), - None, - ), - Err(Error::AncientHeader), - ); - }); - } - - #[test] - fn rejects_known_header() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header.clone(), - &(), - None, - ) - .map(|_| ()), - Ok(()), - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header, - &(), - None, - ) - .map(|_| ()), - Err(Error::KnownHeader), - ); - }); - } - - #[test] - fn import_header_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(ctx.addresses.clone())), - (1, ValidatorsSource::List(validators_addresses(2))), - ]); - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - let hash = header.compute_hash(); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - None, - header, - &(), - None - ) - .map(|_| ()), - Ok(()), - ); - - // check that new validators will be used for next header - let imported_header = Headers::::get(&hash).unwrap(); - assert_eq!( - imported_header.next_validators_set_id, - 1, // new set is enacted from config - ); - }); - } - - #[test] - fn headers_are_pruned_during_import() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = - ValidatorsConfiguration::Single(ValidatorsSource::Contract([3; 20].into(), ctx.addresses.clone())); - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - - // header [0..11] are finalizing blocks [0; 9] - // => since we want to keep 10 finalized blocks, we aren't pruning anything - let mut latest_block_id = Default::default(); - for i in 1..11 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&validators); - let parent_id = header.parent_id().unwrap(); - - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(100), - header, - &(), - None, - ) - .unwrap(); - match i { - 2..=10 => assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,), - _ => assert_eq!(finalized_blocks, vec![], "At {}", i), - } - latest_block_id = rolling_last_block_id; - } - assert!(storage.header(&ctx.genesis.compute_hash()).is_some()); - - // header 11 finalizes headers [10] AND schedules change - // => we prune header#0 - let header11 = HeaderBuilder::with_parent_number(10) - .log_bloom((&[0xff; 256]).into()) - .receipts_root( - "ead6c772ba0083bbff497ba0f4efe47c199a2655401096c21ab7450b6c466d97" - .parse() - .unwrap(), - ) - .sign_by_set(&validators); - let parent_id = header11.parent_id().unwrap(); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(101), - header11.clone(), - &(), - Some(vec![validators_change_receipt(latest_block_id.hash)]), - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![(parent_id, Some(100))]); - assert!(storage.header(&ctx.genesis.compute_hash()).is_none()); - latest_block_id = rolling_last_block_id; - - // and now let's say validators 1 && 2 went offline - // => in the range 12-25 no blocks are finalized, but we still continue to prune old headers - // until header#11 is met. we can't prune #11, because it schedules change - let mut step = 56u64; - let mut expected_blocks = vec![(header11.compute_id(), Some(101))]; - for i in 12..25 { - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(i.into()) - .step(step) - .sign_by_set(&validators); - expected_blocks.push((header.compute_id(), Some(102))); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(102), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![]); - latest_block_id = rolling_last_block_id; - step += 3; - } - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 11, - oldest_block_to_keep: 14, - }, - ); - - // now let's insert block signed by validator 1 - // => blocks 11..24 are finalized and blocks 11..14 are pruned - step -= 2; - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(25.into()) - .step(step) - .sign_by_set(&validators); - let (_, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(103), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, expected_blocks); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 15, - oldest_block_to_keep: 15, - }, - ); - }); - } - - fn import_custom_block( - storage: &mut S, - validators: &[SecretKey], - header: AuraHeader, - ) -> Result { - let id = header.compute_id(); - import_header( - storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &ValidatorsConfiguration::Single(ValidatorsSource::Contract( - [0; 20].into(), - validators.iter().map(secret_to_address).collect(), - )), - None, - header, - &(), - None, - ) - .map(|_| id) - } - - #[test] - fn import_of_non_best_block_may_finalize_blocks() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert headers (H1, validator1), (H2, validator1), (H3, validator1) - // making H3 the best header, without finalizing anything (we need 2 signatures) - let mut expected_best_block = Default::default(); - for i in 1..4 { - let step = 1 + i * TOTAL_VALIDATORS as u64; - expected_best_block = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(i - 1) - .step(step) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - } - let (best_block, best_difficulty) = storage.best_block(); - assert_eq!(best_block, expected_best_block); - assert_eq!(storage.finalized_block(), ctx.genesis.compute_id()); - - // insert headers (H1', validator1), (H2', validator2), finalizing H2, even though H3 - // has better difficulty than H2' (because there are more steps involved) - let mut expected_finalized_block = Default::default(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..3 { - let step = i; - let id = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(parent_hash) - .step(step) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - parent_hash = id.hash; - if i == 1 { - expected_finalized_block = id; - } - } - let (new_best_block, new_best_difficulty) = storage.best_block(); - assert_eq!(new_best_block, expected_best_block); - assert_eq!(new_best_difficulty, best_difficulty); - assert_eq!(storage.finalized_block(), expected_finalized_block); - }); - } - - #[test] - fn append_to_unfinalized_fork_fails() { - const VALIDATORS: u64 = 5; - run_test(VALIDATORS as usize, |ctx| { - let mut storage = BridgeStorage::::new(); - - // header1, authored by validator[2] is best common block between two competing forks - let header1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(0) - .step(2) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header1); - assert_eq!(storage.finalized_block().number, 0); - - // validator[3] has authored header2 (nothing is finalized yet) - let header2 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .step(3) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header2); - assert_eq!(storage.finalized_block().number, 0); - - // validator[4] has authored header3 (header1 is finalized) - let header3 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(2) - .step(4) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header3); - assert_eq!(storage.finalized_block(), header1); - - // validator[4] has authored 4 blocks: header2'...header5' (header1 is still finalized) - let header2_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(4) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header3_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header2_1.hash) - .step(4 + VALIDATORS) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header4_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header3_1.hash) - .step(4 + VALIDATORS * 2) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header5_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4_1.hash) - .step(4 + VALIDATORS * 3) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header1); - - // when we import header4 { parent = header3 }, authored by validator[0], header2 is finalized - let header4 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(3) - .step(5) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header2); - - // when we import header5 { parent = header4 }, authored by validator[1], header3 is finalized - let header5 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4.hash) - .step(6) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5); - assert_eq!(storage.finalized_block(), header3); - - // import of header2'' { parent = header1 } fails, because it has number < best_finalized - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(3) - .sign_by_set(&ctx.validators) - ), - Err(Error::AncientHeader), - ); - - // import of header6' should also fail because we're trying to append to fork thas - // has forked before finalized block - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(5) - .gas_limit((GAS_LIMIT + 1).into()) - .step(5 + VALIDATORS * 4) - .sign_by_set(&ctx.validators), - ), - Err(Error::TryingToFinalizeSibling), - ); - }); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/lib.rs b/polkadot/bridges/modules/ethereum/src/lib.rs deleted file mode 100644 index b25b2b8e635e23f0c60dfecd57f5e1aae04f8762..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/lib.rs +++ /dev/null @@ -1,1533 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use crate::finality::{CachedFinalityVotes, FinalityVotes}; -use bp_eth_poa::{Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256}; -use codec::{Decode, Encode}; -use frame_support::{decl_module, decl_storage, traits::Get}; -use sp_runtime::{ - transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, TransactionValidity, - UnknownTransaction, ValidTransaction, - }, - RuntimeDebug, -}; -use sp_std::{cmp::Ord, collections::btree_map::BTreeMap, prelude::*}; - -pub use validators::{ValidatorsConfiguration, ValidatorsSource}; - -mod error; -mod finality; -mod import; -mod validators; -mod verification; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - -#[cfg(test)] -mod mock; - -#[cfg(any(feature = "runtime-benchmarks", test))] -pub mod test_utils; - -/// Maximal number of blocks we're pruning in single import call. -const MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT: u64 = 8; - -/// Authority round engine configuration parameters. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct AuraConfiguration { - /// Empty step messages transition block. - pub empty_steps_transition: u64, - /// Transition block to strict empty steps validation. - pub strict_empty_steps_transition: u64, - /// Monotonic step validation transition block. - pub validate_step_transition: u64, - /// Chain score validation transition block. - pub validate_score_transition: u64, - /// First block for which a 2/3 quorum (instead of 1/2) is required. - pub two_thirds_majority_transition: u64, - /// Minimum gas limit. - pub min_gas_limit: U256, - /// Maximum gas limit. - pub max_gas_limit: U256, - /// Maximum size of extra data. - pub maximum_extra_data_size: u64, -} - -/// Transaction pool configuration. -/// -/// This is used to limit number of unsigned headers transactions in -/// the pool. We never use it to verify signed transactions. -pub struct PoolConfiguration { - /// Maximal difference between number of header from unsigned transaction - /// and current best block. This must be selected with caution - the more - /// is the difference, the more (potentially invalid) transactions could be - /// accepted to the pool and mined later (filling blocks with spam). - pub max_future_number_difference: u64, -} - -/// Block header as it is stored in the runtime storage. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct StoredHeader { - /// Submitter of this header. May be `None` if header has been submitted - /// using unsigned transaction. - pub submitter: Option, - /// The block header itself. - pub header: AuraHeader, - /// Total difficulty of the chain. - pub total_difficulty: U256, - /// The ID of set of validators that is expected to produce direct descendants of - /// this block. If header enacts new set, this would be the new set. Otherwise - /// this is the set that has produced the block itself. - /// The hash is the hash of block where validators set has been enacted. - pub next_validators_set_id: u64, - /// Hash of the last block which has **SCHEDULED** validators set change. - /// Note that signal doesn't mean that the set has been (or ever will be) enacted. - /// Note that the header may already be pruned. - pub last_signal_block: Option, -} - -/// Validators set as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug)] -#[cfg_attr(test, derive(Clone))] -pub struct ValidatorsSet { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block where this set has been signalled. None if this is the first set. - pub signal_block: Option, - /// Hash of the block where this set has been enacted. - pub enact_block: HeaderId, -} - -/// Validators set change as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug)] -#[cfg_attr(test, derive(Clone))] -pub struct AuraScheduledChange { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block which has emitted previous validators change signal. - pub prev_signal_block: Option, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct HeaderToImport { - /// Header import context, - pub context: ImportContext, - /// Should we consider this header as best? - pub is_best: bool, - /// The id of the header. - pub id: HeaderId, - /// The header itself. - pub header: AuraHeader, - /// Total chain difficulty at the header. - pub total_difficulty: U256, - /// New validators set and the hash of block where it has been scheduled (if applicable). - /// Some if set is is enacted by this header. - pub enacted_change: Option, - /// Validators set scheduled change, if happened at the header. - pub scheduled_change: Option>, - /// Finality votes at this header. - pub finality_votes: FinalityVotes, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ChangeToEnact { - /// The id of the header where change has been scheduled. - /// None if it is a first set within current `ValidatorsSource`. - pub signal_block: Option, - /// Validators set that is enacted. - pub validators: Vec
, -} - -/// Blocks range that we want to prune. -#[derive(Encode, Decode, Default, RuntimeDebug, Clone, PartialEq)] -struct PruningRange { - /// Number of the oldest unpruned block(s). This might be the block that we do not - /// want to prune now (then it is equal to `oldest_block_to_keep`), or block that we - /// were unable to prune for whatever reason (i.e. if it isn't finalized yet and has - /// scheduled validators set change). - pub oldest_unpruned_block: u64, - /// Number of oldest block(s) that we want to keep. We want to prune blocks in range - /// [`oldest_unpruned_block`; `oldest_block_to_keep`). - pub oldest_block_to_keep: u64, -} - -/// Header import context. -/// -/// The import context contains information needed by the header verification -/// pipeline which is not directly part of the header being imported. This includes -/// information relating to its parent, and the current validator set (which -/// provide _context_ for the current header). -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ImportContext { - submitter: Option, - parent_hash: H256, - parent_header: AuraHeader, - parent_total_difficulty: U256, - parent_scheduled_change: Option, - validators_set_id: u64, - validators_set: ValidatorsSet, - last_signal_block: Option, -} - -impl ImportContext { - /// Returns reference to header submitter (if known). - pub fn submitter(&self) -> Option<&Submitter> { - self.submitter.as_ref() - } - - /// Returns reference to parent header. - pub fn parent_header(&self) -> &AuraHeader { - &self.parent_header - } - - /// Returns total chain difficulty at parent block. - pub fn total_difficulty(&self) -> &U256 { - &self.parent_total_difficulty - } - - /// Returns the validator set change if the parent header has signaled a change. - pub fn parent_scheduled_change(&self) -> Option<&AuraScheduledChange> { - self.parent_scheduled_change.as_ref() - } - - /// Returns id of the set of validators. - pub fn validators_set_id(&self) -> u64 { - self.validators_set_id - } - - /// Returns reference to validators set for the block we're going to import. - pub fn validators_set(&self) -> &ValidatorsSet { - &self.validators_set - } - - /// Returns reference to the latest block which has signalled change of validators set. - /// This may point to parent if parent has signalled change. - pub fn last_signal_block(&self) -> Option { - match self.parent_scheduled_change { - Some(_) => Some(HeaderId { - number: self.parent_header.number, - hash: self.parent_hash, - }), - None => self.last_signal_block, - } - } - - /// Converts import context into header we're going to import. - #[allow(clippy::too_many_arguments)] - pub fn into_import_header( - self, - is_best: bool, - id: HeaderId, - header: AuraHeader, - total_difficulty: U256, - enacted_change: Option, - scheduled_change: Option>, - finality_votes: FinalityVotes, - ) -> HeaderToImport { - HeaderToImport { - context: self, - is_best, - id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finality_votes, - } - } -} - -/// The storage that is used by the client. -/// -/// Storage modification must be discarded if block import has failed. -pub trait Storage { - /// Header submitter identifier. - type Submitter: Clone + Ord; - - /// Get best known block and total chain difficulty. - fn best_block(&self) -> (HeaderId, U256); - /// Get last finalized block. - fn finalized_block(&self) -> HeaderId; - /// Get imported header by its hash. - /// - /// Returns header and its submitter (if known). - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)>; - /// Returns latest cached finality votes (if any) for block ancestors, starting - /// from `parent_hash` block and stopping at genesis block, best finalized block - /// or block where `stop_at` returns true. - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes; - /// Get header import context by parent header hash. - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option>; - /// Get new validators that are scheduled by given header and hash of the previous - /// block that has scheduled change. - fn scheduled_change(&self, hash: &H256) -> Option; - /// Insert imported header. - fn insert_header(&mut self, header: HeaderToImport); - /// Finalize given block and schedules pruning of all headers - /// with number < prune_end. - /// - /// The headers in the pruning range could be either finalized, or not. - /// It is the storage duty to ensure that unfinalized headers that have - /// scheduled changes won't be pruned until they or their competitors - /// are finalized. - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64); -} - -/// Headers pruning strategy. -pub trait PruningStrategy: Default { - /// Return upper bound (exclusive) of headers pruning range. - /// - /// Every value that is returned from this function, must be greater or equal to the - /// previous value. Otherwise it will be ignored (we can't revert pruning). - /// - /// Pallet may prune both finalized and unfinalized blocks. But it can't give any - /// guarantees on when it will happen. Example: if some unfinalized block at height N - /// has scheduled validators set change, then the module won't prune any blocks with - /// number >= N even if strategy allows that. - /// - /// If your strategy allows pruning unfinalized blocks, this could lead to switch - /// between finalized forks (only if authorities are misbehaving). But since 50%+1 (or 2/3) - /// authorities are able to do whatever they want with the chain, this isn't considered - /// fatal. If your strategy only prunes finalized blocks, we'll never be able to finalize - /// header that isn't descendant of current best finalized block. - fn pruning_upper_bound(&mut self, best_number: u64, best_finalized_number: u64) -> u64; -} - -/// ChainTime represents the runtime on-chain time -pub trait ChainTime: Default { - /// Is a header timestamp ahead of the current on-chain time. - /// - /// Check whether `timestamp` is ahead (i.e greater than) the current on-chain - /// time. If so, return `true`, `false` otherwise. - fn is_timestamp_ahead(&self, timestamp: u64) -> bool; -} - -/// ChainTime implementation for the empty type. -/// -/// This implementation will allow a runtime without the timestamp pallet to use -/// the empty type as its ChainTime associated type. -impl ChainTime for () { - fn is_timestamp_ahead(&self, _: u64) -> bool { - false - } -} - -/// Callbacks for header submission rewards/penalties. -pub trait OnHeadersSubmitted { - /// Called when valid headers have been submitted. - /// - /// The submitter **must not** be rewarded for submitting valid headers, because greedy authority - /// could produce and submit multiple valid headers (without relaying them to other peers) and - /// get rewarded. Instead, the provider could track submitters and stop rewarding if too many - /// headers have been submitted without finalization. - fn on_valid_headers_submitted(submitter: AccountId, useful: u64, useless: u64); - /// Called when invalid headers have been submitted. - fn on_invalid_headers_submitted(submitter: AccountId); - /// Called when earlier submitted headers have been finalized. - /// - /// finalized is the number of headers that submitter has submitted and which - /// have been finalized. - fn on_valid_headers_finalized(submitter: AccountId, finalized: u64); -} - -impl OnHeadersSubmitted for () { - fn on_valid_headers_submitted(_submitter: AccountId, _useful: u64, _useless: u64) {} - fn on_invalid_headers_submitted(_submitter: AccountId) {} - fn on_valid_headers_finalized(_submitter: AccountId, _finalized: u64) {} -} - -/// The module configuration trait. -pub trait Config: frame_system::Config { - /// Aura configuration. - type AuraConfiguration: Get; - /// Validators configuration. - type ValidatorsConfiguration: Get; - - /// Interval (in blocks) for for finality votes caching. - /// If None, cache is disabled. - /// - /// Ideally, this should either be None (when we are sure that there won't - /// be any significant finalization delays), or something that is bit larger - /// than average finalization delay. - type FinalityVotesCachingInterval: Get>; - /// Headers pruning strategy. - type PruningStrategy: PruningStrategy; - /// Header timestamp verification against current on-chain time. - type ChainTime: ChainTime; - - /// Handler for headers submission result. - type OnHeadersSubmitted: OnHeadersSubmitted; -} - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Import single Aura header. Requires transaction to be **UNSIGNED**. - #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_unsigned_header(origin, header: AuraHeader, receipts: Option>) { - frame_system::ensure_none(origin)?; - - import::import_header( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - None, - header, - &T::ChainTime::default(), - receipts, - ).map_err(|e| e.msg())?; - } - - /// Import Aura chain headers in a single **SIGNED** transaction. - /// Ignores non-fatal errors (like when known header is provided), rewards - /// for successful headers import and penalizes for fatal errors. - /// - /// This should be used with caution - passing too many headers could lead to - /// enormous block production/import time. - #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_signed_headers(origin, headers_with_receipts: Vec<(AuraHeader, Option>)>) { - let submitter = frame_system::ensure_signed(origin)?; - let mut finalized_headers = BTreeMap::new(); - let import_result = import::import_headers( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - Some(submitter.clone()), - headers_with_receipts, - &T::ChainTime::default(), - &mut finalized_headers, - ); - - // if we have finalized some headers, we will reward their submitters even - // if current submitter has provided some invalid headers - for (f_submitter, f_count) in finalized_headers { - T::OnHeadersSubmitted::on_valid_headers_finalized( - f_submitter, - f_count, - ); - } - - // now track/penalize current submitter for providing new headers - match import_result { - Ok((useful, useless)) => - T::OnHeadersSubmitted::on_valid_headers_submitted(submitter, useful, useless), - Err(error) => { - // even though we may have accept some headers, we do not want to reward someone - // who provides invalid headers - T::OnHeadersSubmitted::on_invalid_headers_submitted(submitter); - return Err(error.msg().into()); - }, - } - } - } -} - -decl_storage! { - trait Store for Pallet, I: Instance = DefaultInstance> as Bridge { - /// Best known block. - BestBlock: (HeaderId, U256); - /// Best finalized block. - FinalizedBlock: HeaderId; - /// Range of blocks that we want to prune. - BlocksToPrune: PruningRange; - /// Map of imported headers by hash. - Headers: map hasher(identity) H256 => Option>; - /// Map of imported header hashes by number. - HeadersByNumber: map hasher(blake2_128_concat) u64 => Option>; - /// Map of cached finality data by header hash. - FinalityCache: map hasher(identity) H256 => Option>; - /// The ID of next validator set. - NextValidatorsSetId: u64; - /// Map of validators sets by their id. - ValidatorsSets: map hasher(twox_64_concat) u64 => Option; - /// Validators sets reference count. Each header that is authored by this set increases - /// the reference count. When we prune this header, we decrease the reference count. - /// When it reaches zero, we are free to prune validator set as well. - ValidatorsSetsRc: map hasher(twox_64_concat) u64 => Option; - /// Map of validators set changes scheduled by given header. - ScheduledChanges: map hasher(identity) H256 => Option; - } - add_extra_genesis { - config(initial_header): AuraHeader; - config(initial_difficulty): U256; - config(initial_validators): Vec
; - build(|config| { - // the initial blocks should be selected so that: - // 1) it doesn't signal validators changes; - // 2) there are no scheduled validators changes from previous blocks; - // 3) (implied) all direct children of initial block are authored by the same validators set. - - assert!( - !config.initial_validators.is_empty(), - "Initial validators set can't be empty", - ); - - initialize_storage::( - &config.initial_header, - config.initial_difficulty, - &config.initial_validators, - ); - }) - } -} - -impl, I: Instance> Pallet { - /// Returns number and hash of the best block known to the bridge module. - /// The caller should only submit `import_header` transaction that makes - /// (or leads to making) other header the best one. - pub fn best_block() -> HeaderId { - BridgeStorage::::new().best_block().0 - } - - /// Returns number and hash of the best finalized block known to the bridge module. - pub fn finalized_block() -> HeaderId { - BridgeStorage::::new().finalized_block() - } - - /// Returns true if the import of given block requires transactions receipts. - pub fn is_import_requires_receipts(header: AuraHeader) -> bool { - import::header_import_requires_receipts( - &BridgeStorage::::new(), - &T::ValidatorsConfiguration::get(), - &header, - ) - } - - /// Returns true if header is known to the runtime. - pub fn is_known_block(hash: H256) -> bool { - BridgeStorage::::new().header(&hash).is_some() - } - - /// Verify that transaction is included into given finalized block. - pub fn verify_transaction_finalized( - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], - ) -> bool { - crate::verify_transaction_finalized(&BridgeStorage::::new(), block, tx_index, proof) - } -} - -impl, I: Instance> frame_support::unsigned::ValidateUnsigned for Pallet { - type Call = Call; - - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - match *call { - Self::Call::import_unsigned_header(ref header, ref receipts) => { - let accept_result = verification::accept_aura_header_into_pool( - &BridgeStorage::::new(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - &pool_configuration(), - header, - &T::ChainTime::default(), - receipts.as_ref(), - ); - - match accept_result { - Ok((requires, provides)) => Ok(ValidTransaction { - priority: TransactionPriority::max_value(), - requires, - provides, - longevity: TransactionLongevity::max_value(), - propagate: true, - }), - // UnsignedTooFarInTheFuture is the special error code used to limit - // number of transactions in the pool - we do not want to ban transaction - // in this case (see verification.rs for details) - Err(error::Error::UnsignedTooFarInTheFuture) => { - UnknownTransaction::Custom(error::Error::UnsignedTooFarInTheFuture.code()).into() - } - Err(error) => InvalidTransaction::Custom(error.code()).into(), - } - } - _ => InvalidTransaction::Call.into(), - } - } -} - -/// Runtime bridge storage. -#[derive(Default)] -pub struct BridgeStorage(sp_std::marker::PhantomData<(T, I)>); - -impl, I: Instance> BridgeStorage { - /// Create new BridgeStorage. - pub fn new() -> Self { - BridgeStorage(sp_std::marker::PhantomData::<(T, I)>::default()) - } - - /// Prune old blocks. - fn prune_blocks(&self, mut max_blocks_to_prune: u64, finalized_number: u64, prune_end: u64) { - let pruning_range = BlocksToPrune::::get(); - let mut new_pruning_range = pruning_range.clone(); - - // update oldest block we want to keep - if prune_end > new_pruning_range.oldest_block_to_keep { - new_pruning_range.oldest_block_to_keep = prune_end; - } - - // start pruning blocks - let begin = new_pruning_range.oldest_unpruned_block; - let end = new_pruning_range.oldest_block_to_keep; - log::trace!(target: "runtime", "Pruning blocks in range [{}..{})", begin, end); - for number in begin..end { - // if we can't prune anything => break - if max_blocks_to_prune == 0 { - break; - } - - // read hashes of blocks with given number and try to prune these blocks - let blocks_at_number = HeadersByNumber::::take(number); - if let Some(mut blocks_at_number) = blocks_at_number { - self.prune_blocks_by_hashes( - &mut max_blocks_to_prune, - finalized_number, - number, - &mut blocks_at_number, - ); - - // if we haven't pruned all blocks, remember unpruned - if !blocks_at_number.is_empty() { - HeadersByNumber::::insert(number, blocks_at_number); - break; - } - } - - // we have pruned all headers at number - new_pruning_range.oldest_unpruned_block = number + 1; - log::trace!( - target: "runtime", - "Oldest unpruned PoA header is now: {}", - new_pruning_range.oldest_unpruned_block, - ); - } - - // update pruning range in storage - if pruning_range != new_pruning_range { - BlocksToPrune::::put(new_pruning_range); - } - } - - /// Prune old blocks with given hashes. - fn prune_blocks_by_hashes( - &self, - max_blocks_to_prune: &mut u64, - finalized_number: u64, - number: u64, - blocks_at_number: &mut Vec, - ) { - // ensure that unfinalized headers we want to prune do not have scheduled changes - if number > finalized_number && blocks_at_number.iter().any(ScheduledChanges::::contains_key) { - return; - } - - // physically remove headers and (probably) obsolete validators sets - while let Some(hash) = blocks_at_number.pop() { - let header = Headers::::take(&hash); - log::trace!( - target: "runtime", - "Pruning PoA header: ({}, {})", - number, - hash, - ); - - ScheduledChanges::::remove(hash); - FinalityCache::::remove(hash); - if let Some(header) = header { - ValidatorsSetsRc::::mutate(header.next_validators_set_id, |rc| match *rc { - Some(rc) if rc > 1 => Some(rc - 1), - _ => None, - }); - } - - // check if we have already pruned too much headers in this call - *max_blocks_to_prune -= 1; - if *max_blocks_to_prune == 0 { - return; - } - } - } -} - -impl, I: Instance> Storage for BridgeStorage { - type Submitter = T::AccountId; - - fn best_block(&self) -> (HeaderId, U256) { - BestBlock::::get() - } - - fn finalized_block(&self) -> HeaderId { - FinalizedBlock::::get() - } - - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)> { - Headers::::get(hash).map(|header| (header.header, header.submitter)) - } - - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes { - let mut votes = CachedFinalityVotes::default(); - let mut current_id = *parent; - loop { - // if we have reached finalized block's sibling => stop with special signal - if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash { - votes.stopped_at_finalized_sibling = true; - return votes; - } - - // if we have reached target header => stop - if stop_at(¤t_id.hash) { - return votes; - } - - // if we have found cached votes => stop - let cached_votes = FinalityCache::::get(¤t_id.hash); - if let Some(cached_votes) = cached_votes { - votes.votes = Some(cached_votes); - return votes; - } - - // read next parent header id - let header = match Headers::::get(¤t_id.hash) { - Some(header) if header.header.number != 0 => header, - _ => return votes, - }; - let parent_id = header.header.parent_id().expect( - "only returns None at genesis header;\ - the header is proved to have number > 0;\ - qed", - ); - - votes - .unaccounted_ancestry - .push_back((current_id, header.submitter, header.header)); - - current_id = parent_id; - } - } - - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option> { - Headers::::get(parent_hash).map(|parent_header| { - let validators_set = ValidatorsSets::::get(parent_header.next_validators_set_id) - .expect("validators set is only pruned when last ref is pruned; there is a ref; qed"); - let parent_scheduled_change = ScheduledChanges::::get(parent_hash); - ImportContext { - submitter, - parent_hash: *parent_hash, - parent_header: parent_header.header, - parent_total_difficulty: parent_header.total_difficulty, - parent_scheduled_change, - validators_set_id: parent_header.next_validators_set_id, - validators_set, - last_signal_block: parent_header.last_signal_block, - } - }) - } - - fn scheduled_change(&self, hash: &H256) -> Option { - ScheduledChanges::::get(hash) - } - - fn insert_header(&mut self, header: HeaderToImport) { - if header.is_best { - BestBlock::::put((header.id, header.total_difficulty)); - } - if let Some(scheduled_change) = header.scheduled_change { - ScheduledChanges::::insert( - &header.id.hash, - AuraScheduledChange { - validators: scheduled_change, - prev_signal_block: header.context.last_signal_block, - }, - ); - } - let next_validators_set_id = match header.enacted_change { - Some(enacted_change) => { - let next_validators_set_id = NextValidatorsSetId::::mutate(|set_id| { - let next_set_id = *set_id; - *set_id += 1; - next_set_id - }); - ValidatorsSets::::insert( - next_validators_set_id, - ValidatorsSet { - validators: enacted_change.validators, - enact_block: header.id, - signal_block: enacted_change.signal_block, - }, - ); - ValidatorsSetsRc::::insert(next_validators_set_id, 1); - next_validators_set_id - } - None => { - ValidatorsSetsRc::::mutate(header.context.validators_set_id, |rc| { - *rc = Some(rc.map(|rc| rc + 1).unwrap_or(1)); - *rc - }); - header.context.validators_set_id - } - }; - - let finality_votes_caching_interval = T::FinalityVotesCachingInterval::get(); - if let Some(finality_votes_caching_interval) = finality_votes_caching_interval { - let cache_entry_required = header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0; - if cache_entry_required { - FinalityCache::::insert(header.id.hash, header.finality_votes); - } - } - - log::trace!( - target: "runtime", - "Inserting PoA header: ({}, {})", - header.header.number, - header.id.hash, - ); - - let last_signal_block = header.context.last_signal_block(); - HeadersByNumber::::append(header.id.number, header.id.hash); - Headers::::insert( - &header.id.hash, - StoredHeader { - submitter: header.context.submitter, - header: header.header, - total_difficulty: header.total_difficulty, - next_validators_set_id, - last_signal_block, - }, - ); - } - - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64) { - // remember just finalized block - let finalized_number = finalized - .as_ref() - .map(|f| f.number) - .unwrap_or_else(|| FinalizedBlock::::get().number); - if let Some(finalized) = finalized { - log::trace!( - target: "runtime", - "Finalizing PoA header: ({}, {})", - finalized.number, - finalized.hash, - ); - - FinalizedBlock::::put(finalized); - } - - // and now prune headers if we need to - self.prune_blocks(MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT, finalized_number, prune_end); - } -} - -/// Initialize storage. -#[cfg(any(feature = "std", feature = "runtime-benchmarks"))] -pub(crate) fn initialize_storage, I: Instance>( - initial_header: &AuraHeader, - initial_difficulty: U256, - initial_validators: &[Address], -) { - let initial_hash = initial_header.compute_hash(); - log::trace!( - target: "runtime", - "Initializing bridge with PoA header: ({}, {})", - initial_header.number, - initial_hash, - ); - - let initial_id = HeaderId { - number: initial_header.number, - hash: initial_hash, - }; - BestBlock::::put((initial_id, initial_difficulty)); - FinalizedBlock::::put(initial_id); - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: initial_header.number, - oldest_block_to_keep: initial_header.number, - }); - HeadersByNumber::::insert(initial_header.number, vec![initial_hash]); - Headers::::insert( - initial_hash, - StoredHeader { - submitter: None, - header: initial_header.clone(), - total_difficulty: initial_difficulty, - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - NextValidatorsSetId::::put(1); - ValidatorsSets::::insert( - 0, - ValidatorsSet { - validators: initial_validators.to_vec(), - signal_block: None, - enact_block: initial_id, - }, - ); - ValidatorsSetsRc::::insert(0, 1); -} - -/// Verify that transaction is included into given finalized block. -pub fn verify_transaction_finalized( - storage: &S, - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], -) -> bool { - if tx_index >= proof.len() as _ { - log::trace!( - target: "runtime", - "Tx finality check failed: transaction index ({}) is larger than number of transactions ({})", - tx_index, - proof.len(), - ); - - return false; - } - - let header = match storage.header(&block) { - Some((header, _)) => header, - None => { - log::trace!( - target: "runtime", - "Tx finality check failed: can't find header in the storage: {}", - block, - ); - - return false; - } - }; - let finalized = storage.finalized_block(); - - // if header is not yet finalized => return - if header.number > finalized.number { - log::trace!( - target: "runtime", - "Tx finality check failed: header {}/{} is not finalized. Best finalized: {}", - header.number, - block, - finalized.number, - ); - - return false; - } - - // check if header is actually finalized - let is_finalized = match header.number < finalized.number { - true => ancestry(storage, finalized.hash) - .skip_while(|(_, ancestor)| ancestor.number > header.number) - .any(|(ancestor_hash, _)| ancestor_hash == block), - false => block == finalized.hash, - }; - if !is_finalized { - log::trace!( - target: "runtime", - "Tx finality check failed: header {} is not finalized: no canonical path to best finalized block {}", - block, - finalized.hash, - ); - - return false; - } - - // verify that transaction is included in the block - if let Err(computed_root) = header.check_transactions_root(proof.iter().map(|(tx, _)| tx)) { - log::trace!( - target: "runtime", - "Tx finality check failed: transactions root mismatch. Expected: {}, computed: {}", - header.transactions_root, - computed_root, - ); - - return false; - } - - // verify that transaction receipt is included in the block - if let Err(computed_root) = header.check_raw_receipts_root(proof.iter().map(|(_, r)| r)) { - log::trace!( - target: "runtime", - "Tx finality check failed: receipts root mismatch. Expected: {}, computed: {}", - header.receipts_root, - computed_root, - ); - - return false; - } - - // check that transaction has completed successfully - let is_successful_raw_receipt = Receipt::is_successful_raw_receipt(&proof[tx_index as usize].1); - match is_successful_raw_receipt { - Ok(true) => true, - Ok(false) => { - log::trace!( - target: "runtime", - "Tx finality check failed: receipt shows that transaction has failed", - ); - - false - } - Err(err) => { - log::trace!( - target: "runtime", - "Tx finality check failed: receipt check has failed: {}", - err, - ); - - false - } - } -} - -/// Transaction pool configuration. -fn pool_configuration() -> PoolConfiguration { - PoolConfiguration { - max_future_number_difference: 10, - } -} - -/// Return iterator of given header ancestors. -fn ancestry(storage: &'_ S, mut parent_hash: H256) -> impl Iterator + '_ { - sp_std::iter::from_fn(move || { - let (header, _) = storage.header(&parent_hash)?; - if header.number == 0 { - return None; - } - - let hash = parent_hash; - parent_hash = header.parent_hash; - Some((hash, header)) - }) -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::finality::FinalityAncestor; - use crate::mock::{ - genesis, insert_header, run_test, run_test_with_genesis, validators_addresses, HeaderBuilder, TestRuntime, - GAS_LIMIT, - }; - use crate::test_utils::validator_utils::*; - use bp_eth_poa::compute_merkle_root; - - const TOTAL_VALIDATORS: usize = 3; - - fn example_tx() -> Vec { - vec![42] - } - - fn example_tx_receipt(success: bool) -> Vec { - Receipt { - // the only thing that we care of: - outcome: bp_eth_poa::TransactionOutcome::StatusCode(if success { 1 } else { 0 }), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - } - - fn example_header_with_failed_receipt() -> AuraHeader { - HeaderBuilder::with_parent(&example_header()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(false)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header() -> AuraHeader { - HeaderBuilder::with_parent(&example_header_parent()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header_parent() -> AuraHeader { - HeaderBuilder::with_parent(&genesis()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn with_headers_to_prune(f: impl Fn(BridgeStorage) -> T) -> T { - run_test(TOTAL_VALIDATORS, |ctx| { - for i in 1..10 { - let mut headers_by_number = Vec::with_capacity(5); - for j in 0..5 { - let header = HeaderBuilder::with_parent_number(i - 1) - .gas_limit((GAS_LIMIT + j).into()) - .sign_by_set(&ctx.validators); - let hash = header.compute_hash(); - headers_by_number.push(hash); - Headers::::insert( - hash, - StoredHeader { - submitter: None, - header, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - - if i == 7 && j == 1 { - ScheduledChanges::::insert( - hash, - AuraScheduledChange { - validators: validators_addresses(5), - prev_signal_block: None, - }, - ); - } - } - HeadersByNumber::::insert(i, headers_by_number); - } - - f(BridgeStorage::new()) - }) - } - - #[test] - fn blocks_are_not_pruned_if_range_is_empty() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 5); - assert_eq!(HeadersByNumber::::get(&5).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }, - ); - }); - } - - #[test] - fn blocks_to_prune_never_shrinks_from_the_end() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }, - ); - }); - } - - #[test] - fn blocks_are_not_pruned_if_limit_is_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(0, 10, 10); - assert!(HeadersByNumber::::get(&0).is_some()); - assert!(HeadersByNumber::::get(&1).is_some()); - assert!(HeadersByNumber::::get(&2).is_some()); - assert!(HeadersByNumber::::get(&3).is_some()); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: 10, - }, - ); - }); - } - - #[test] - fn blocks_are_pruned_if_limit_is_non_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(7, 10, 10); - // 1 headers with number = 0 is pruned (1 total) - assert!(HeadersByNumber::::get(&0).is_none()); - // 5 headers with number = 1 are pruned (6 total) - assert!(HeadersByNumber::::get(&1).is_none()); - // 1 header with number = 2 are pruned (7 total) - assert_eq!(HeadersByNumber::::get(&2).unwrap().len(), 4); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 2, - oldest_block_to_keep: 10, - }, - ); - - // try to prune blocks [2; 10) - storage.prune_blocks(11, 10, 10); - // 4 headers with number = 2 are pruned (4 total) - assert!(HeadersByNumber::::get(&2).is_none()); - // 5 headers with number = 3 are pruned (9 total) - assert!(HeadersByNumber::::get(&3).is_none()); - // 2 headers with number = 4 are pruned (11 total) - assert_eq!(HeadersByNumber::::get(&4).unwrap().len(), 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 4, - oldest_block_to_keep: 10, - }, - ); - }); - } - - #[test] - fn pruning_stops_on_unfainalized_block_with_scheduled_change() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - // last finalized block is 5 - // and one of blocks#7 has scheduled change - // => we won't prune any block#7 at all - storage.prune_blocks(0xFFFF, 5, 10); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&1).is_none()); - assert!(HeadersByNumber::::get(&2).is_none()); - assert!(HeadersByNumber::::get(&3).is_none()); - assert!(HeadersByNumber::::get(&4).is_none()); - assert!(HeadersByNumber::::get(&5).is_none()); - assert!(HeadersByNumber::::get(&6).is_none()); - assert_eq!(HeadersByNumber::::get(&7).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 7, - oldest_block_to_keep: 10, - }, - ); - }); - } - - #[test] - fn finality_votes_are_cached() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let interval = ::FinalityVotesCachingInterval::get().unwrap(); - - // for all headers with number < interval, cache entry is not created - for i in 1..interval { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - let id = header.compute_id(); - insert_header(&mut storage, header); - assert_eq!(FinalityCache::::get(&id.hash), None); - } - - // for header with number = interval, cache entry is created - let header_with_entry = HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators); - let header_with_entry_hash = header_with_entry.compute_hash(); - insert_header(&mut storage, header_with_entry); - assert!(FinalityCache::::get(&header_with_entry_hash).is_some()); - - // when we later prune this header, cache entry is removed - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: interval - 1, - oldest_block_to_keep: interval - 1, - }); - storage.finalize_and_prune_headers(None, interval + 1); - assert_eq!(FinalityCache::::get(&header_with_entry_hash), None); - }); - } - - #[test] - fn cached_finality_votes_finds_entry() { - run_test(TOTAL_VALIDATORS, |ctx| { - // insert 5 headers - let mut storage = BridgeStorage::::new(); - let mut headers = Vec::new(); - for i in 1..5 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - headers.push(header.clone()); - insert_header(&mut storage, header); - } - - // when inserting header#6, entry isn't found - let id5 = headers.last().unwrap().compute_id(); - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: None, - }, - ); - - // let's now create entry at #3 - let hash3 = headers[2].compute_hash(); - let votes_at_3 = FinalityVotes { - votes: vec![([42; 20].into(), 21)].into_iter().collect(), - ancestry: vec![FinalityAncestor { - id: HeaderId { - number: 100, - hash: Default::default(), - }, - ..Default::default() - }] - .into_iter() - .collect(), - }; - FinalityCache::::insert(hash3, votes_at_3.clone()); - - // searching at #6 again => entry is found - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .skip(3) - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: Some(votes_at_3), - }, - ); - }); - } - - #[test] - fn cached_finality_votes_stops_at_finalized_sibling() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert header1 - let header1 = HeaderBuilder::with_parent_number(0).sign_by_set(&ctx.validators); - let header1_id = header1.compute_id(); - insert_header(&mut storage, header1); - - // insert header1' - sibling of header1 - let header1s = HeaderBuilder::with_parent_number(0) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators); - let header1s_id = header1s.compute_id(); - insert_header(&mut storage, header1s); - - // header1 is finalized - FinalizedBlock::::put(header1_id); - - // trying to get finality votes when importing header2 -> header1 succeeds - assert!( - !storage - .cached_finality_votes(&header1_id, &genesis().compute_id(), |_| false) - .stopped_at_finalized_sibling - ); - - // trying to get finality votes when importing header2s -> header1s fails - assert!( - storage - .cached_finality_votes(&header1s_id, &header1_id, |_| false) - .stopped_at_finalized_sibling - ); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header_ancestor() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(verify_transaction_finalized( - &storage, - example_header_parent().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_proof_with_missing_tx() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 1, - &[], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unknown_header() { - run_test(TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 1, - &[], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unfinalized_header() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_sibling() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_sibling = example_header(); - finalized_header_sibling.timestamp = 1; - let finalized_header_sibling_hash = finalized_header_sibling.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - insert_header(&mut storage, finalized_header_sibling); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(!verify_transaction_finalized( - &storage, - finalized_header_sibling_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_uncle() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_uncle = example_header_parent(); - finalized_header_uncle.timestamp = 1; - let finalized_header_uncle_hash = finalized_header_uncle.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, finalized_header_uncle); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert!(!verify_transaction_finalized( - &storage, - finalized_header_uncle_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_transactions_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[ - (example_tx(), example_tx_receipt(true)), - (example_tx(), example_tx_receipt(true)) - ], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_receipts_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), vec![42])], - )); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_failed_transaction() { - run_test_with_genesis(example_header_with_failed_receipt(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert!(!verify_transaction_finalized( - &storage, - example_header_with_failed_receipt().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(false))], - )); - }); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/mock.rs b/polkadot/bridges/modules/ethereum/src/mock.rs deleted file mode 100644 index c8102cdb0b05d82f905b078d4262990411fa58e0..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/mock.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -pub use crate::test_utils::{insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT}; -pub use bp_eth_poa::signatures::secret_to_address; - -use crate::validators::{ValidatorsConfiguration, ValidatorsSource}; -use crate::{AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy}; -use bp_eth_poa::{Address, AuraHeader, H256, U256}; -use frame_support::{parameter_types, weights::Weight}; -use secp256k1::SecretKey; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as pallet_ethereum; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Ethereum: pallet_ethereum::{Pallet, Call}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = SubstrateHeader; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -parameter_types! { - pub const TestFinalityVotesCachingInterval: Option = Some(16); - pub TestAuraConfiguration: AuraConfiguration = test_aura_config(); - pub TestValidatorsConfiguration: ValidatorsConfiguration = test_validators_config(); -} - -impl Config for TestRuntime { - type AuraConfiguration = TestAuraConfiguration; - type ValidatorsConfiguration = TestValidatorsConfiguration; - type FinalityVotesCachingInterval = TestFinalityVotesCachingInterval; - type PruningStrategy = KeepSomeHeadersBehindBest; - type ChainTime = ConstChainTime; - type OnHeadersSubmitted = (); -} - -/// Test context. -pub struct TestContext { - /// Initial (genesis) header. - pub genesis: AuraHeader, - /// Number of initial validators. - pub total_validators: usize, - /// Secret keys of validators, ordered by validator index. - pub validators: Vec, - /// Addresses of validators, ordered by validator index. - pub addresses: Vec
, -} - -/// Aura configuration that is used in tests by default. -pub fn test_aura_config() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::MAX, - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::MAX, - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::MAX, - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration that is used in tests by default. -pub fn test_validators_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(validators_addresses(3))) -} - -/// Genesis header that is used in tests by default. -pub fn genesis() -> AuraHeader { - HeaderBuilder::genesis().sign_by(&validator(0)) -} - -/// Run test with default genesis header. -pub fn run_test(total_validators: usize, test: impl FnOnce(TestContext) -> T) -> T { - run_test_with_genesis(genesis(), total_validators, test) -} - -/// Run test with default genesis header. -pub fn run_test_with_genesis( - genesis: AuraHeader, - total_validators: usize, - test: impl FnOnce(TestContext) -> T, -) -> T { - let validators = validators(total_validators); - let addresses = validators_addresses(total_validators); - sp_io::TestExternalities::new( - CrateGenesisConfig { - initial_header: genesis.clone(), - initial_difficulty: 0.into(), - initial_validators: addresses.clone(), - } - .build_storage::() - .unwrap(), - ) - .execute_with(|| { - test(TestContext { - genesis, - total_validators, - validators, - addresses, - }) - }) -} - -/// Pruning strategy that keeps 10 headers behind best block. -pub struct KeepSomeHeadersBehindBest(pub u64); - -impl Default for KeepSomeHeadersBehindBest { - fn default() -> KeepSomeHeadersBehindBest { - KeepSomeHeadersBehindBest(10) - } -} - -impl PruningStrategy for KeepSomeHeadersBehindBest { - fn pruning_upper_bound(&mut self, best_number: u64, _: u64) -> u64 { - best_number.saturating_sub(self.0) - } -} - -/// Constant chain time -#[derive(Default)] -pub struct ConstChainTime; - -impl ChainTime for ConstChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = i32::MAX as u64 / 2; - timestamp > now - } -} diff --git a/polkadot/bridges/modules/ethereum/src/test_utils.rs b/polkadot/bridges/modules/ethereum/src/test_utils.rs deleted file mode 100644 index 41161089ba6d07e0eb056a66890558df80fe8553..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/test_utils.rs +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for testing and benchmarking the Ethereum Bridge Pallet. -//! -//! Although the name implies that it is used by tests, it shouldn't be be used _directly_ by tests. -//! Instead these utilities should be used by the Mock runtime, which in turn is used by tests. -//! -//! On the other hand, they may be used directly by the bechmarking module. - -// Since this is test code it's fine that not everything is used -#![allow(dead_code)] - -use crate::finality::FinalityVotes; -use crate::validators::CHANGE_EVENT_HASH; -use crate::verification::calculate_score; -use crate::{Config, HeaderToImport, Storage}; - -use bp_eth_poa::{ - rlp_encode, - signatures::{secret_to_address, sign, SignHeader}, - Address, AuraHeader, Bloom, Receipt, SealedEmptyStep, H256, U256, -}; -use secp256k1::SecretKey; -use sp_std::prelude::*; - -/// Gas limit valid in test environment. -pub const GAS_LIMIT: u64 = 0x2000; - -/// Test header builder. -pub struct HeaderBuilder { - header: AuraHeader, - parent_header: AuraHeader, -} - -impl HeaderBuilder { - /// Creates default genesis header. - pub fn genesis() -> Self { - let current_step = 0u64; - Self { - header: AuraHeader { - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - ..Default::default() - }, - parent_header: Default::default(), - } - } - - /// Creates default header on top of test parent with given hash. - #[cfg(test)] - pub fn with_parent_hash(parent_hash: H256) -> Self { - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of test parent with given number. First parent is selected. - #[cfg(test)] - pub fn with_parent_number(parent_number: u64) -> Self { - Self::with_parent_number_on_runtime::(parent_number) - } - - /// Creates default header on top of parent with given hash. - pub fn with_parent_hash_on_runtime, I: crate::Instance>(parent_hash: H256) -> Self { - use crate::Headers; - use frame_support::StorageMap; - - let parent_header = Headers::::get(&parent_hash).unwrap().header; - Self::with_parent(&parent_header) - } - - /// Creates default header on top of parent with given number. First parent is selected. - pub fn with_parent_number_on_runtime, I: crate::Instance>(parent_number: u64) -> Self { - use crate::HeadersByNumber; - use frame_support::StorageMap; - - let parent_hash = HeadersByNumber::::get(parent_number).unwrap()[0]; - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of non-existent parent. - #[cfg(test)] - pub fn with_number(number: u64) -> Self { - Self::with_parent(&AuraHeader { - number: number - 1, - seal: vec![bp_eth_poa::rlp_encode(&(number - 1)).to_vec(), vec![]], - ..Default::default() - }) - } - - /// Creates default header on top of given parent. - pub fn with_parent(parent_header: &AuraHeader) -> Self { - let parent_step = parent_header.step().unwrap(); - let current_step = parent_step + 1; - Self { - header: AuraHeader { - parent_hash: parent_header.compute_hash(), - number: parent_header.number + 1, - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - difficulty: calculate_score(parent_step, current_step, 0), - ..Default::default() - }, - parent_header: parent_header.clone(), - } - } - - /// Update step of this header. - pub fn step(mut self, step: u64) -> Self { - let parent_step = self.parent_header.step(); - self.header.seal[0] = rlp_encode(&step).to_vec(); - self.header.difficulty = parent_step - .map(|parent_step| calculate_score(parent_step, step, 0)) - .unwrap_or_default(); - self - } - - /// Adds empty steps to this header. - pub fn empty_steps(mut self, empty_steps: &[(&SecretKey, u64)]) -> Self { - let sealed_empty_steps = empty_steps - .iter() - .map(|(author, step)| { - let mut empty_step = SealedEmptyStep { - step: *step, - signature: Default::default(), - }; - let message = empty_step.message(&self.header.parent_hash); - let signature: [u8; 65] = sign(author, message).into(); - empty_step.signature = signature.into(); - empty_step - }) - .collect::>(); - - // by default in test configuration headers are generated without empty steps seal - if self.header.seal.len() < 3 { - self.header.seal.push(Vec::new()); - } - - self.header.seal[2] = SealedEmptyStep::rlp_of(&sealed_empty_steps); - self - } - - /// Update difficulty field of this header. - pub fn difficulty(mut self, difficulty: U256) -> Self { - self.header.difficulty = difficulty; - self - } - - /// Update extra data field of this header. - pub fn extra_data(mut self, extra_data: Vec) -> Self { - self.header.extra_data = extra_data; - self - } - - /// Update gas limit field of this header. - pub fn gas_limit(mut self, gas_limit: U256) -> Self { - self.header.gas_limit = gas_limit; - self - } - - /// Update gas used field of this header. - pub fn gas_used(mut self, gas_used: U256) -> Self { - self.header.gas_used = gas_used; - self - } - - /// Update log bloom field of this header. - pub fn log_bloom(mut self, log_bloom: Bloom) -> Self { - self.header.log_bloom = log_bloom; - self - } - - /// Update receipts root field of this header. - pub fn receipts_root(mut self, receipts_root: H256) -> Self { - self.header.receipts_root = receipts_root; - self - } - - /// Update timestamp field of this header. - pub fn timestamp(mut self, timestamp: u64) -> Self { - self.header.timestamp = timestamp; - self - } - - /// Update transactions root field of this header. - pub fn transactions_root(mut self, transactions_root: H256) -> Self { - self.header.transactions_root = transactions_root; - self - } - - /// Signs header by given author. - pub fn sign_by(self, author: &SecretKey) -> AuraHeader { - self.header.sign_by(author) - } - - /// Signs header by given authors set. - pub fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader { - self.header.sign_by_set(authors) - } -} - -/// Helper function for getting a genesis header which has been signed by an authority. -pub fn build_genesis_header(author: &SecretKey) -> AuraHeader { - let genesis = HeaderBuilder::genesis(); - genesis.header.sign_by(author) -} - -/// Helper function for building a custom child header which has been signed by an authority. -pub fn build_custom_header(author: &SecretKey, previous: &AuraHeader, customize_header: F) -> AuraHeader -where - F: FnOnce(AuraHeader) -> AuraHeader, -{ - let new_header = HeaderBuilder::with_parent(previous); - let custom_header = customize_header(new_header.header); - custom_header.sign_by(author) -} - -/// Insert unverified header into storage. -/// -/// This function assumes that the header is signed by validator from the current set. -pub fn insert_header(storage: &mut S, header: AuraHeader) { - let id = header.compute_id(); - let best_finalized = storage.finalized_block(); - let import_context = storage.import_context(None, &header.parent_hash).unwrap(); - let parent_finality_votes = storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false); - let finality_votes = crate::finality::prepare_votes( - parent_finality_votes, - best_finalized, - &import_context.validators_set().validators.iter().collect(), - id, - &header, - None, - ) - .unwrap(); - - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id, - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes, - }); -} - -/// Insert unverified header into storage. -/// -/// No assumptions about header author are made. The cost is that finality votes cache -/// is filled incorrectly, so this function shall not be used if you're going to insert -/// (or import) header descendants. -pub fn insert_dummy_header(storage: &mut S, header: AuraHeader) { - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id: header.compute_id(), - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: FinalityVotes::default(), - }); -} - -pub fn validators_change_receipt(parent_hash: H256) -> Receipt { - use bp_eth_poa::{LogEntry, TransactionOutcome}; - - Receipt { - gas_used: 0.into(), - log_bloom: (&[0xff; 256]).into(), - outcome: TransactionOutcome::Unknown, - logs: vec![LogEntry { - address: [3; 20].into(), - topics: vec![CHANGE_EVENT_HASH.into(), parent_hash], - data: vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - ], - }], - } -} - -pub mod validator_utils { - use super::*; - - /// Return key pair of given test validator. - pub fn validator(index: usize) -> SecretKey { - let mut raw_secret = [0u8; 32]; - raw_secret[..8].copy_from_slice(&(index + 1).to_le_bytes()); - SecretKey::parse(&raw_secret).unwrap() - } - - /// Return key pairs of all test validators. - pub fn validators(count: usize) -> Vec { - (0..count).map(validator).collect() - } - - /// Return address of test validator. - pub fn validator_address(index: usize) -> Address { - secret_to_address(&validator(index)) - } - - /// Return addresses of all test validators. - pub fn validators_addresses(count: usize) -> Vec
{ - (0..count).map(validator_address).collect() - } -} diff --git a/polkadot/bridges/modules/ethereum/src/validators.rs b/polkadot/bridges/modules/ethereum/src/validators.rs deleted file mode 100644 index f9add9f2d80cf6b5cfc3f7a21ab95e79e2be5609..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/validators.rs +++ /dev/null @@ -1,473 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::{ChangeToEnact, Storage}; -use bp_eth_poa::{Address, AuraHeader, HeaderId, LogEntry, Receipt, U256}; -use sp_std::prelude::*; - -/// The hash of InitiateChange event of the validators set contract. -pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[ - 0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, 0xd2, 0xc2, 0x28, - 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89, -]; - -/// Where source of validators addresses come from. This covers the chain lifetime. -pub enum ValidatorsConfiguration { - /// There's a single source for the whole chain lifetime. - Single(ValidatorsSource), - /// Validators source changes at given blocks. The blocks are ordered - /// by the block number. - Multi(Vec<(u64, ValidatorsSource)>), -} - -/// Where validators addresses come from. -/// -/// This source is valid within some blocks range. The blocks range could -/// cover multiple epochs - i.e. the validators that are authoring blocks -/// within this range could change, but the source itself can not. -#[cfg_attr(any(test, feature = "runtime-benchmarks"), derive(Debug, PartialEq))] -pub enum ValidatorsSource { - /// The validators addresses are hardcoded and never change. - List(Vec
), - /// The validators addresses are determined by the validators set contract - /// deployed at given address. The contract must implement the `ValidatorSet` - /// interface. Additionally, the initial validators set must be provided. - Contract(Address, Vec
), -} - -/// A short hand for optional validators change. -pub type ValidatorsChange = Option>; - -/// Validators manager. -pub struct Validators<'a> { - config: &'a ValidatorsConfiguration, -} - -impl<'a> Validators<'a> { - /// Creates new validators manager using given configuration. - pub fn new(config: &'a ValidatorsConfiguration) -> Self { - Self { config } - } - - /// Returns true if header (probabilistically) signals validators change and - /// the caller needs to provide transactions receipts to import the header. - pub fn maybe_signals_validators_change(&self, header: &AuraHeader) -> bool { - let (_, _, source) = self.source_at(header.number); - - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return false, - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - header.log_bloom.contains(&expected_bloom) - } - - /// Extracts validators change signal from the header. - /// - /// Returns tuple where first element is the change scheduled by this header - /// (i.e. this change is only applied starting from the block that has finalized - /// current block). The second element is the immediately applied change. - pub fn extract_validators_change( - &self, - header: &AuraHeader, - receipts: Option>, - ) -> Result<(ValidatorsChange, ValidatorsChange), Error> { - // let's first check if new source is starting from this header - let (source_index, _, source) = self.source_at(header.number); - let (next_starts_at, next_source) = self.source_at_next_header(source_index, header.number); - if next_starts_at == header.number { - match *next_source { - ValidatorsSource::List(ref new_list) => return Ok((None, Some(new_list.clone()))), - ValidatorsSource::Contract(_, ref new_list) => return Ok((Some(new_list.clone()), None)), - } - } - - // else deal with previous source - // - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return Ok((None, None)), - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - if !header.log_bloom.contains(&expected_bloom) { - return Ok((None, None)); - } - - let receipts = receipts.ok_or(Error::MissingTransactionsReceipts)?; - if header.check_receipts_root(&receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch); - } - - // iterate in reverse because only the _last_ change in a given - // block actually has any effect - Ok(( - receipts - .iter() - .rev() - .filter(|r| r.log_bloom.contains(&expected_bloom)) - .flat_map(|r| r.logs.iter()) - .filter(|l| { - l.address == *contract_address - && l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH - && l.topics[1] == header.parent_hash - }) - .filter_map(|l| { - let data_len = l.data.len(); - if data_len < 64 { - return None; - } - - let new_validators_len_u256 = U256::from_big_endian(&l.data[32..64]); - let new_validators_len = new_validators_len_u256.low_u64(); - if new_validators_len_u256 != new_validators_len.into() { - return None; - } - - if (data_len - 64) as u64 != new_validators_len.saturating_mul(32) { - return None; - } - - Some( - l.data[64..] - .chunks(32) - .map(|chunk| { - let mut new_validator = Address::default(); - new_validator.as_mut().copy_from_slice(&chunk[12..32]); - new_validator - }) - .collect(), - ) - }) - .next(), - None, - )) - } - - /// Finalize changes when blocks are finalized. - pub fn finalize_validators_change( - &self, - storage: &S, - finalized_blocks: &[(HeaderId, Option)], - ) -> Option { - // if we haven't finalized any blocks, no changes may be finalized - let newest_finalized_id = finalized_blocks.last().map(|(id, _)| id)?; - let oldest_finalized_id = finalized_blocks - .first() - .map(|(id, _)| id) - .expect("finalized_blocks is not empty; qed"); - - // try to directly go to the header that has scheduled last change - // - // if we're unable to create import context for some block, it means - // that the header has already been pruned => it and its ancestors had - // no scheduled changes - // - // if we're unable to find scheduled changes for some block, it means - // that these changes have been finalized already - storage - .import_context(None, &newest_finalized_id.hash) - .and_then(|context| context.last_signal_block()) - .and_then(|signal_block| { - if signal_block.number >= oldest_finalized_id.number { - Some(signal_block) - } else { - None - } - }) - .and_then(|signal_block| { - storage - .scheduled_change(&signal_block.hash) - .map(|change| ChangeToEnact { - signal_block: Some(signal_block), - validators: change.validators, - }) - }) - } - - /// Returns source of validators that should author the header. - fn source_at(&self, header_number: u64) -> (usize, u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, 0, source), - ValidatorsConfiguration::Multi(ref sources) => sources - .iter() - .rev() - .enumerate() - .find(|(_, &(begin, _))| begin < header_number) - .map(|(i, (begin, source))| (sources.len() - 1 - i, *begin, source)) - .expect( - "there's always entry for the initial block;\ - we do not touch any headers with number < initial block number; qed", - ), - } - } - - /// Returns source of validators that should author the next header. - fn source_at_next_header(&self, header_source_index: usize, header_number: u64) -> (u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, source), - ValidatorsConfiguration::Multi(ref sources) => { - let next_source_index = header_source_index + 1; - if next_source_index < sources.len() { - let next_source = &sources[next_source_index]; - if next_source.0 < header_number + 1 { - return (next_source.0, &next_source.1); - } - } - - let source = &sources[header_source_index]; - (source.0, &source.1) - } - } - } -} - -impl ValidatorsSource { - /// Returns initial validators set. - pub fn initial_epoch_validators(&self) -> Vec
{ - match self { - ValidatorsSource::List(ref list) => list.clone(), - ValidatorsSource::Contract(_, ref list) => list.clone(), - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime}; - use crate::DefaultInstance; - use crate::{AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader}; - use bp_eth_poa::compute_merkle_root; - use frame_support::StorageMap; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn source_at_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - - assert_eq!( - validators.source_at(99), - (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(0, 99), - (0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - - assert_eq!( - validators.source_at(100), - (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(0, 100), - (100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - - assert_eq!( - validators.source_at(200), - (1, 100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(1, 200), - (200, &ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ); - } - - #[test] - fn maybe_signals_validators_change_works() { - // when contract is active, but bloom has no required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); - let validators = Validators::new(&config); - let mut header = AuraHeader { - number: u64::MAX, - ..Default::default() - }; - assert!(!validators.maybe_signals_validators_change(&header)); - - // when contract is active and bloom has required bits set - header.log_bloom = (&[0xff; 256]).into(); - assert!(validators.maybe_signals_validators_change(&header)); - - // when list is active and bloom has required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::List(vec![[42; 20].into()])); - let validators = Validators::new(&config); - assert!(!validators.maybe_signals_validators_change(&header)); - } - - #[test] - fn extract_validators_change_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - let mut header = AuraHeader { - number: 100, - ..Default::default() - }; - - // when we're at the block that switches to list source - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((None, Some(vec![[2; 20].into()]))), - ); - - // when we're inside list range - header.number = 150; - assert_eq!(validators.extract_validators_change(&header, None), Ok((None, None))); - - // when we're at the block that switches to contract source - header.number = 200; - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((Some(vec![[3; 20].into()]), None)), - ); - - // when we're inside contract range and logs bloom signals change - // but we have no receipts - header.number = 250; - header.log_bloom = (&[0xff; 256]).into(); - assert_eq!( - validators.extract_validators_change(&header, None), - Err(Error::MissingTransactionsReceipts), - ); - - // when we're inside contract range and logs bloom signals change - // but there's no change in receipts - header.receipts_root = "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - .parse() - .unwrap(); - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Ok((None, None)), - ); - - // when we're inside contract range and logs bloom signals change - // and there's change in receipts - let receipts = vec![validators_change_receipt(Default::default())]; - header.receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - assert_eq!( - validators.extract_validators_change(&header, Some(receipts)), - Ok((Some(vec![[7; 20].into()]), None)), - ); - - // when incorrect receipts root passed - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - fn try_finalize_with_scheduled_change(scheduled_at: Option) -> Option { - run_test(TOTAL_VALIDATORS, |_| { - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); - let validators = Validators::new(&config); - let storage = BridgeStorage::::new(); - - // when we're finailizing blocks 10...100 - let id10 = HeaderId { - number: 10, - hash: [10; 32].into(), - }; - let id100 = HeaderId { - number: 100, - hash: [100; 32].into(), - }; - let finalized_blocks = vec![(id10, None), (id100, None)]; - let header100 = StoredHeader:: { - submitter: None, - header: AuraHeader { - number: 100, - ..Default::default() - }, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: scheduled_at, - }; - let scheduled_change = AuraScheduledChange { - validators: validators_addresses(1), - prev_signal_block: None, - }; - Headers::::insert(id100.hash, header100); - if let Some(scheduled_at) = scheduled_at { - ScheduledChanges::::insert(scheduled_at.hash, scheduled_change); - } - - validators.finalize_validators_change(&storage, &finalized_blocks) - }) - } - - #[test] - fn finalize_validators_change_finalizes_scheduled_change() { - let id50 = HeaderId { - number: 50, - ..Default::default() - }; - assert_eq!( - try_finalize_with_scheduled_change(Some(id50)), - Some(ChangeToEnact { - signal_block: Some(id50), - validators: validators_addresses(1), - }), - ); - } - - #[test] - fn finalize_validators_change_does_not_finalize_when_changes_are_not_scheduled() { - assert_eq!(try_finalize_with_scheduled_change(None), None); - } - - #[test] - fn finalize_validators_change_does_not_finalize_changes_when_they_are_outside_of_range() { - let id5 = HeaderId { - number: 5, - ..Default::default() - }; - assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/verification.rs b/polkadot/bridges/modules/ethereum/src/verification.rs deleted file mode 100644 index 68a17ff391de5500494ff29ac48ad50a140939f9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/verification.rs +++ /dev/null @@ -1,945 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::validators::{Validators, ValidatorsConfiguration}; -use crate::{AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage}; -use bp_eth_poa::{ - public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep, H256, H520, U128, U256, -}; -use codec::Encode; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::transaction_validity::TransactionTag; -use sp_std::{vec, vec::Vec}; - -/// Pre-check to see if should try and import this header. -/// Returns error if we should not try to import this block. -/// Returns ID of passed header and best finalized header. -pub fn is_importable_header(storage: &S, header: &AuraHeader) -> Result<(HeaderId, HeaderId), Error> { - // we never import any header that competes with finalized header - let finalized_id = storage.finalized_block(); - if header.number <= finalized_id.number { - return Err(Error::AncientHeader); - } - // we never import any header with known hash - let id = header.compute_id(); - if storage.header(&id.hash).is_some() { - return Err(Error::KnownHeader); - } - - Ok((id, finalized_id)) -} - -/// Try accept unsigned aura header into transaction pool. -/// -/// Returns required and provided tags. -pub fn accept_aura_header_into_pool( - storage: &S, - config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - pool_config: &PoolConfiguration, - header: &AuraHeader, - chain_time: &CT, - receipts: Option<&Vec>, -) -> Result<(Vec, Vec), Error> { - // check if we can verify further - let (header_id, _) = is_importable_header(storage, header)?; - - // we can always do contextless checks - contextless_checks(config, header, chain_time)?; - - // we want to avoid having same headers twice in the pool - // => we're strict about receipts here - if we need them, we require receipts to be Some, - // otherwise we require receipts to be None - let receipts_required = Validators::new(validators_config).maybe_signals_validators_change(header); - match (receipts_required, receipts.is_some()) { - (true, false) => return Err(Error::MissingTransactionsReceipts), - (false, true) => return Err(Error::RedundantTransactionsReceipts), - _ => (), - } - - // we do not want to have all future headers in the pool at once - // => if we see header with number > maximal ever seen header number + LIMIT, - // => we consider this transaction invalid, but only at this moment (we do not want to ban it) - // => let's mark it as Unknown transaction - let (best_id, _) = storage.best_block(); - let difference = header.number.saturating_sub(best_id.number); - if difference > pool_config.max_future_number_difference { - return Err(Error::UnsignedTooFarInTheFuture); - } - - // TODO: only accept new headers when we're at the tip of PoA chain - // https://github.com/paritytech/parity-bridges-common/issues/38 - - // we want to see at most one header with given number from single authority - // => every header is providing tag (block_number + authority) - // => since only one tx in the pool can provide the same tag, they're auto-deduplicated - let provides_number_and_authority_tag = (header.number, header.author).encode(); - - // we want to see several 'future' headers in the pool at once, but we may not have access to - // previous headers here - // => we can at least 'verify' that headers comprise a chain by providing and requiring - // tag (header.number, header.hash) - let provides_header_number_and_hash_tag = header_id.encode(); - - // depending on whether parent header is available, we either perform full or 'shortened' check - let context = storage.import_context(None, &header.parent_hash); - let tags = match context { - Some(context) => { - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - // since our parent is already in the storage, we do not require it - // to be in the transaction pool - ( - vec![], - vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], - ) - } - None => { - // we know nothing about parent header - // => the best thing we can do is to believe that there are no forks in - // PoA chain AND that the header is produced either by previous, or next - // scheduled validators set change - let header_step = header.step().ok_or(Error::MissingStep)?; - let best_context = storage.import_context(None, &best_id.hash).expect( - "import context is None only when header is missing from the storage;\ - best header is always in the storage; qed", - ); - let validators_check_result = - validator_checks(config, &best_context.validators_set().validators, header, header_step); - if let Err(error) = validators_check_result { - find_next_validators_signal(storage, &best_context) - .ok_or(error) - .and_then(|next_validators| validator_checks(config, &next_validators, header, header_step))?; - } - - // since our parent is missing from the storage, we **DO** require it - // to be in the transaction pool - // (- 1 can't underflow because there's always best block in the header) - let requires_header_number_and_hash_tag = HeaderId { - number: header.number - 1, - hash: header.parent_hash, - } - .encode(); - ( - vec![requires_header_number_and_hash_tag], - vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], - ) - } - }; - - // the heaviest, but rare operation - we do not want invalid receipts in the pool - if let Some(receipts) = receipts { - log::trace!(target: "runtime", "Got receipts! {:?}", receipts); - if header.check_receipts_root(receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch); - } - } - - Ok(tags) -} - -/// Verify header by Aura rules. -pub fn verify_aura_header( - storage: &S, - config: &AuraConfiguration, - submitter: Option, - header: &AuraHeader, - chain_time: &CT, -) -> Result, Error> { - // let's do the lightest check first - contextless_checks(config, header, chain_time)?; - - // the rest of checks requires access to the parent header - let context = storage.import_context(submitter, &header.parent_hash).ok_or_else(|| { - log::warn!( - target: "runtime", - "Missing parent PoA block: ({:?}, {})", - header.number.checked_sub(1), - header.parent_hash, - ); - - Error::MissingParentBlock - })?; - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - Ok(context) -} - -/// Perform basic checks that only require header itself. -fn contextless_checks( - config: &AuraConfiguration, - header: &AuraHeader, - chain_time: &CT, -) -> Result<(), Error> { - let expected_seal_fields = expected_header_seal_fields(config, header); - if header.seal.len() != expected_seal_fields { - return Err(Error::InvalidSealArity); - } - if header.number >= u64::MAX { - return Err(Error::RidiculousNumber); - } - if header.gas_used > header.gas_limit { - return Err(Error::TooMuchGasUsed); - } - if header.gas_limit < config.min_gas_limit { - return Err(Error::InvalidGasLimit); - } - if header.gas_limit > config.max_gas_limit { - return Err(Error::InvalidGasLimit); - } - if header.number != 0 && header.extra_data.len() as u64 > config.maximum_extra_data_size { - return Err(Error::ExtraDataOutOfBounds); - } - - // we can't detect if block is from future in runtime - // => let's only do an overflow check - if header.timestamp > i32::MAX as u64 { - return Err(Error::TimestampOverflow); - } - - if chain_time.is_timestamp_ahead(header.timestamp) { - return Err(Error::HeaderTimestampIsAhead); - } - - Ok(()) -} - -/// Perform checks that require access to parent header. -fn contextual_checks( - config: &AuraConfiguration, - context: &ImportContext, - validators_override: Option<&[Address]>, - header: &AuraHeader, -) -> Result { - let validators = validators_override.unwrap_or_else(|| &context.validators_set().validators); - let header_step = header.step().ok_or(Error::MissingStep)?; - let parent_step = context.parent_header().step().ok_or(Error::MissingStep)?; - - // Ensure header is from the step after context. - if header_step == parent_step { - return Err(Error::DoubleVote); - } - #[allow(clippy::suspicious_operation_groupings)] - if header.number >= config.validate_step_transition && header_step < parent_step { - return Err(Error::DoubleVote); - } - - // If empty step messages are enabled we will validate the messages in the seal, missing messages are not - // reported as there's no way to tell whether the empty step message was never sent or simply not included. - let empty_steps_len = match header.number >= config.empty_steps_transition { - true => { - let strict_empty_steps = header.number >= config.strict_empty_steps_transition; - let empty_steps = header.empty_steps().ok_or(Error::MissingEmptySteps)?; - let empty_steps_len = empty_steps.len(); - let mut prev_empty_step = 0; - - for empty_step in empty_steps { - if empty_step.step <= parent_step || empty_step.step >= header_step { - return Err(Error::InsufficientProof); - } - - if !verify_empty_step(&header.parent_hash, &empty_step, validators) { - return Err(Error::InsufficientProof); - } - - if strict_empty_steps { - if empty_step.step <= prev_empty_step { - return Err(Error::InsufficientProof); - } - - prev_empty_step = empty_step.step; - } - } - - empty_steps_len - } - false => 0, - }; - - // Validate chain score. - if header.number >= config.validate_score_transition { - let expected_difficulty = calculate_score(parent_step, header_step, empty_steps_len as _); - if header.difficulty != expected_difficulty { - return Err(Error::InvalidDifficulty); - } - } - - Ok(header_step) -} - -/// Check that block is produced by expected validator. -fn validator_checks( - config: &AuraConfiguration, - validators: &[Address], - header: &AuraHeader, - header_step: u64, -) -> Result<(), Error> { - let expected_validator = *step_validator(validators, header_step); - if header.author != expected_validator { - return Err(Error::NotValidator); - } - - let validator_signature = header.signature().ok_or(Error::MissingSignature)?; - let header_seal_hash = header - .seal_hash(header.number >= config.empty_steps_transition) - .ok_or(Error::MissingEmptySteps)?; - let is_invalid_proposer = !verify_signature(&expected_validator, &validator_signature, &header_seal_hash); - if is_invalid_proposer { - return Err(Error::NotValidator); - } - - Ok(()) -} - -/// Returns expected number of seal fields in the header. -fn expected_header_seal_fields(config: &AuraConfiguration, header: &AuraHeader) -> usize { - if header.number != u64::MAX && header.number >= config.empty_steps_transition { - 3 - } else { - 2 - } -} - -/// Verify single sealed empty step. -fn verify_empty_step(parent_hash: &H256, step: &SealedEmptyStep, validators: &[Address]) -> bool { - let expected_validator = *step_validator(validators, step.step); - let message = step.message(parent_hash); - verify_signature(&expected_validator, &step.signature, &message) -} - -/// Chain scoring: total weight is sqrt(U256::MAX)*height - step -pub(crate) fn calculate_score(parent_step: u64, current_step: u64, current_empty_steps: usize) -> U256 { - U256::from(U128::MAX) + U256::from(parent_step) - U256::from(current_step) + U256::from(current_empty_steps) -} - -/// Verify that the signature over message has been produced by given validator. -fn verify_signature(expected_validator: &Address, signature: &H520, message: &H256) -> bool { - secp256k1_ecdsa_recover(signature.as_fixed_bytes(), message.as_fixed_bytes()) - .map(|public| public_to_address(&public)) - .map(|address| *expected_validator == address) - .unwrap_or(false) -} - -/// Find next unfinalized validators set change after finalized set. -fn find_next_validators_signal(storage: &S, context: &ImportContext) -> Option> { - // that's the earliest block number we may met in following loop - // it may be None if that's the first set - let best_set_signal_block = context.validators_set().signal_block; - - // if parent schedules validators set change, then it may be our set - // else we'll start with last known change - let mut current_set_signal_block = context.last_signal_block(); - let mut next_scheduled_set: Option = None; - - loop { - // if we have reached block that signals finalized change, then - // next_current_block_hash points to the block that schedules next - // change - let current_scheduled_set = match current_set_signal_block { - Some(current_set_signal_block) if Some(¤t_set_signal_block) == best_set_signal_block.as_ref() => { - return next_scheduled_set.map(|scheduled_set| scheduled_set.validators) - } - None => return next_scheduled_set.map(|scheduled_set| scheduled_set.validators), - Some(current_set_signal_block) => storage.scheduled_change(¤t_set_signal_block.hash).expect( - "header that is associated with this change is not pruned;\ - scheduled changes are only removed when header is pruned; qed", - ), - }; - - current_set_signal_block = current_scheduled_set.prev_signal_block; - next_scheduled_set = Some(current_scheduled_set); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - insert_header, run_test_with_genesis, test_aura_config, validator, validator_address, validators_addresses, - validators_change_receipt, AccountId, ConstChainTime, HeaderBuilder, TestRuntime, GAS_LIMIT, - }; - use crate::validators::ValidatorsSource; - use crate::DefaultInstance; - use crate::{ - pool_configuration, BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId, - ScheduledChanges, ValidatorsSet, ValidatorsSets, - }; - use bp_eth_poa::{compute_merkle_root, rlp_encode, TransactionOutcome, H520, U256}; - use frame_support::{StorageMap, StorageValue}; - use hex_literal::hex; - use secp256k1::SecretKey; - use sp_runtime::transaction_validity::TransactionTag; - - const GENESIS_STEP: u64 = 42; - const TOTAL_VALIDATORS: usize = 3; - - fn genesis() -> AuraHeader { - HeaderBuilder::genesis().step(GENESIS_STEP).sign_by(&validator(0)) - } - - fn verify_with_config(config: &AuraConfiguration, header: &AuraHeader) -> Result, Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - verify_aura_header(&storage, config, None, header, &ConstChainTime::default()) - }) - } - - fn default_verify(header: &AuraHeader) -> Result, Error> { - verify_with_config(&test_aura_config(), header) - } - - fn default_accept_into_pool( - mut make_header: impl FnMut(&[SecretKey]) -> (AuraHeader, Option>), - ) -> Result<(Vec, Vec), Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - let block1 = HeaderBuilder::with_parent_number(0).sign_by_set(&validators); - insert_header(&mut storage, block1); - let block2 = HeaderBuilder::with_parent_number(1).sign_by_set(&validators); - let block2_id = block2.compute_id(); - insert_header(&mut storage, block2); - let block3 = HeaderBuilder::with_parent_number(2).sign_by_set(&validators); - insert_header(&mut storage, block3); - - FinalizedBlock::::put(block2_id); - - let validators_config = - ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); - let (header, receipts) = make_header(&validators); - accept_aura_header_into_pool( - &storage, - &test_aura_config(), - &validators_config, - &pool_configuration(), - &header, - &(), - receipts.as_ref(), - ) - }) - } - - fn change_validators_set_at(number: u64, finalized_set: Vec
, signalled_set: Option>) { - let set_id = NextValidatorsSetId::::get(); - NextValidatorsSetId::::put(set_id + 1); - ValidatorsSets::::insert( - set_id, - ValidatorsSet { - validators: finalized_set, - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: HeadersByNumber::::get(&0).unwrap()[0], - }, - }, - ); - - let header_hash = HeadersByNumber::::get(&number).unwrap()[0]; - let mut header = Headers::::get(&header_hash).unwrap(); - header.next_validators_set_id = set_id; - if let Some(signalled_set) = signalled_set { - header.last_signal_block = Some(HeaderId { - number: header.header.number - 1, - hash: header.header.parent_hash, - }); - ScheduledChanges::::insert( - header.header.parent_hash, - AuraScheduledChange { - validators: signalled_set, - prev_signal_block: None, - }, - ); - } - - Headers::::insert(header_hash, header); - } - - #[test] - fn verifies_seal_count() { - // when there are no seals at all - let mut header = AuraHeader::default(); - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's single seal (we expect 2 or 3 seals) - header.seal = vec![vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 3 seals (we expect 2 by default) - header.seal = vec![vec![], vec![], vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 2 seals - header.seal = vec![vec![], vec![]]; - assert_ne!(default_verify(&header), Err(Error::InvalidSealArity)); - } - - #[test] - fn verifies_header_number() { - // when number is u64::MAX - let header = HeaderBuilder::with_number(u64::MAX).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::RidiculousNumber)); - - // when header is < u64::MAX - let header = HeaderBuilder::with_number(u64::MAX - 1).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::RidiculousNumber)); - } - - #[test] - fn verifies_gas_used() { - // when gas used is larger than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT + 1).into()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TooMuchGasUsed)); - - // when gas used is less than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT - 1).into()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TooMuchGasUsed)); - } - - #[test] - fn verifies_gas_limit() { - let mut config = test_aura_config(); - config.min_gas_limit = 100.into(); - config.max_gas_limit = 200.into(); - - // when limit is lower than expected - let header = HeaderBuilder::with_number(1) - .gas_limit(50.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is larger than expected - let header = HeaderBuilder::with_number(1) - .gas_limit(250.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is within expected range - let header = HeaderBuilder::with_number(1) - .gas_limit(150.into()) - .sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - } - - #[test] - fn verifies_extra_data_len() { - // when extra data is too large - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(1000).collect::>()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - - // when extra data size is OK - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(10).collect::>()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - } - - #[test] - fn verifies_timestamp() { - // when timestamp overflows i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::MAX as u64 + 1) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TimestampOverflow)); - - // when timestamp doesn't overflow i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::MAX as u64) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TimestampOverflow)); - } - - #[test] - fn verifies_chain_time() { - // expected import context after verification - let expect = ImportContext:: { - submitter: None, - parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(), - parent_header: genesis(), - parent_total_difficulty: U256::zero(), - parent_scheduled_change: None, - validators_set_id: 0, - validators_set: ValidatorsSet { - validators: vec![ - hex!("dc5b20847f43d67928f49cd4f85d696b5a7617b5").into(), - hex!("897df33a7b3c62ade01e22c13d48f98124b4480f").into(), - hex!("05c987b34c6ef74e0c7e69c6e641120c24164c2d").into(), - ], - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(), - }, - }, - last_signal_block: None, - }; - - // header is behind - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::MAX as u64 / 2 - 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - - // header is ahead - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::MAX as u64 / 2 + 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header), Err(Error::HeaderTimestampIsAhead)); - - // header has same timestamp as ConstChainTime - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::MAX as u64 / 2) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - } - - #[test] - fn verifies_parent_existence() { - // when there's no parent in the storage - let header = HeaderBuilder::with_number(1).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::MissingParentBlock)); - - // when parent is in the storage - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::MissingParentBlock)); - } - - #[test] - fn verifies_step() { - // when step is missing from seals - let mut header = AuraHeader { - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - parent_hash: genesis().compute_hash(), - ..Default::default() - }; - assert_eq!(default_verify(&header), Err(Error::MissingStep)); - - // when step is the same as for the parent block - header.seal[0] = rlp_encode(&42u64).to_vec(); - assert_eq!(default_verify(&header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&43u64).to_vec(); - assert_ne!(default_verify(&header), Err(Error::DoubleVote)); - - // now check with validate_step check enabled - let mut config = test_aura_config(); - config.validate_step_transition = 0; - - // when step is lesser that for the parent block - header.seal[0] = rlp_encode(&40u64).to_vec(); - header.seal = vec![vec![40], vec![]]; - assert_eq!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&44u64).to_vec(); - assert_ne!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - } - - #[test] - fn verifies_empty_step() { - let mut config = test_aura_config(); - config.empty_steps_transition = 0; - - // when empty step duplicates parent step - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(0), GENESIS_STEP)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty step signature check fails - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(100), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when we are accepting strict empty steps and they come not in order - config.strict_empty_steps_transition = 0; - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(2), GENESIS_STEP + 2), (&validator(1), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty steps are OK - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(1), GENESIS_STEP + 1), (&validator(2), GENESIS_STEP + 2)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - } - - #[test] - fn verifies_chain_score() { - let mut config = test_aura_config(); - config.validate_score_transition = 0; - - // when chain score is invalid - let header = HeaderBuilder::with_parent(&genesis()) - .difficulty(100.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - - // when chain score is accepted - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - } - - #[test] - fn verifies_validator() { - let good_header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(1)); - - // when header author is invalid - let mut header = good_header.clone(); - header.author = Default::default(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when header signature is invalid - let mut header = good_header.clone(); - header.seal[1] = rlp_encode(&H520::default()).to_vec(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when everything is OK - assert_eq!(default_verify(&good_header).map(|_| ()), Ok(())); - } - - #[test] - fn pool_verifies_known_blocks() { - // when header is known - assert_eq!( - default_accept_into_pool(|validators| (HeaderBuilder::with_parent_number(2).sign_by_set(validators), None)), - Err(Error::KnownHeader), - ); - } - - #[test] - fn pool_verifies_ancient_blocks() { - // when header number is less than finalized - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(validators), - None, - ),), - Err(Error::AncientHeader), - ); - } - - #[test] - fn pool_rejects_headers_without_required_receipts() { - assert_eq!( - default_accept_into_pool(|_| ( - AuraHeader { - number: 20_000_000, - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - log_bloom: (&[0xff; 256]).into(), - ..Default::default() - }, - None, - ),), - Err(Error::MissingTransactionsReceipts), - ); - } - - #[test] - fn pool_rejects_headers_with_redundant_receipts() { - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3).sign_by_set(validators), - Some(vec![Receipt { - gas_used: 1.into(), - log_bloom: (&[0xff; 256]).into(), - logs: vec![], - outcome: TransactionOutcome::Unknown, - }]), - ),), - Err(Error::RedundantTransactionsReceipts), - ); - } - - #[test] - fn pool_verifies_future_block_number() { - // when header is too far from the future - assert_eq!( - default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(validators), None),), - Err(Error::UnsignedTooFarInTheFuture), - ); - } - - #[test] - fn pool_performs_full_verification_when_parent_is_known() { - // if parent is known, then we'll execute contextual_checks, which - // checks for DoubleVote - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3) - .step(GENESIS_STEP + 3) - .sign_by_set(validators), - None, - ),), - Err(Error::DoubleVote), - ); - } - - #[test] - fn pool_performs_validators_checks_when_parent_is_unknown() { - // if parent is unknown, then we still need to check if header has required signature - // (even if header will be considered invalid/duplicate later, we can use this signature - // as a proof of malicious action by this validator) - assert_eq!( - default_accept_into_pool(|_| (HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)), None,)), - Err(Error::NotValidator), - ); - } - - #[test] - fn pool_verifies_header_with_known_parent() { - let mut hash = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3).sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, None) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![ - (4u64, validators_addresses(3)[1]).encode(), - (4u64, hash.unwrap()).encode(), - ], - )), - ); - } - - #[test] - fn pool_verifies_header_with_unknown_parent() { - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 5) - .sign_by_set(validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode()], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_uses_next_validators_set_when_finalized_fails() { - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header - change_validators_set_at(3, validators_addresses(1), None); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - - (header, None) - }), - Err(Error::NotValidator), - ); - - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header + signal valid set at parent block - change_validators_set_at(3, validators_addresses(10), Some(validators_addresses(3))); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode(),], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_rejects_headers_with_invalid_receipts() { - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .sign_by_set(validators); - (header, Some(vec![validators_change_receipt(Default::default())])) - }), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - #[test] - fn pool_accepts_headers_with_valid_receipts() { - let mut hash = None; - let receipts = vec![validators_change_receipt(Default::default())]; - let receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .receipts_root(receipts_root) - .sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, Some(receipts.clone())) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![ - (4u64, validators_addresses(3)[1]).encode(), - (4u64, hash.unwrap()).encode(), - ], - )), - ); - } -} diff --git a/polkadot/bridges/modules/grandpa/Cargo.toml b/polkadot/bridges/modules/grandpa/Cargo.toml index 53f1916d62d9836b713d56e2e8ab9d3d35658b1e..01195abe89e16435dfd6cd7c9fce164e1c86103d 100644 --- a/polkadot/bridges/modules/grandpa/Cargo.toml +++ b/polkadot/bridges/modules/grandpa/Cargo.toml @@ -8,8 +8,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -finality-grandpa = { version = "0.14.4", default-features = false } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } +finality-grandpa = { version = "0.14.0", default-features = false } log = { version = "0.4.14", default-features = false } num-traits = { version = "0.2", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } @@ -22,18 +22,19 @@ bp-header-chain = { path = "../../primitives/header-chain", default-features = f # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } # Optional Benchmarking Dependencies bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } [dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] diff --git a/polkadot/bridges/modules/grandpa/src/benchmarking.rs b/polkadot/bridges/modules/grandpa/src/benchmarking.rs index bc027e86a4b59863b216bc87b6c31b2c40de66ac..46e1e41a87028bd18dd7ce03a3862e00cd518d6f 100644 --- a/polkadot/bridges/modules/grandpa/src/benchmarking.rs +++ b/polkadot/bridges/modules/grandpa/src/benchmarking.rs @@ -23,7 +23,7 @@ //! 2. The number of `pre-commits` in the justification //! //! Vote ancestries are the headers between (`finality_target`, `head_of_chain`], where -//! `header_of_chain` is a decendant of `finality_target`. +//! `header_of_chain` is a descendant of `finality_target`. //! //! Pre-commits are messages which are signed by validators at the head of the chain they think is //! the best. @@ -34,7 +34,7 @@ //! [A] <- [B] <- [C] //! //! The common ancestor of both forks is block A, so this is what GRANDPA will finalize. In order to -//! verify this we will have vote ancestries of [B, C, B', C'] and pre-commits [C, C']. +//! verify this we will have vote ancestries of `[B, C, B', C']` and pre-commits `[C, C']`. //! //! Note that the worst case scenario here would be a justification where each validator has it's //! own fork which is `SESSION_LENGTH` blocks long. @@ -42,7 +42,8 @@ use crate::*; use bp_test_utils::{ - accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID, + accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, + TEST_GRANDPA_SET_ID, }; use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; use frame_support::traits::Get; @@ -63,7 +64,7 @@ const MAX_VALIDATOR_SET_SIZE: u32 = 1024; /// Returns number of first header to be imported. /// -/// Since we boostrap the pallet with `HeadersToKeep` already imported headers, +/// Since we bootstrap the pallet with `HeadersToKeep` already imported headers, /// this function computes the next expected header number to import. fn header_number, I: 'static, N: From>() -> N { (T::HeadersToKeep::get() + 1).into() @@ -80,7 +81,7 @@ fn prepare_benchmark_data, I: 'static>( .collect::>(); let init_data = InitializationData { - header: bp_test_utils::test_header(Zero::zero()), + header: Box::new(bp_test_utils::test_header(Zero::zero())), authority_list, set_id: TEST_GRANDPA_SET_ID, is_halted: false, @@ -109,7 +110,7 @@ benchmarks_instance_pallet! { let v in 1..MAX_VOTE_ANCESTRIES; let caller: T::AccountId = whitelisted_caller(); let (header, justification) = prepare_benchmark_data::(p, v); - }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) + }: submit_finality_proof(RawOrigin::Signed(caller), Box::new(header), justification) verify { let header: BridgedHeader = bp_test_utils::test_header(header_number::()); let expected_hash = header.hash(); diff --git a/polkadot/bridges/modules/grandpa/src/lib.rs b/polkadot/bridges/modules/grandpa/src/lib.rs index 700df5b8469099af516d8c6d3ca4851fb2d9fcd3..cbc85da30259f0a6bf201779efed77c489bce361 100644 --- a/polkadot/bridges/modules/grandpa/src/lib.rs +++ b/polkadot/bridges/modules/grandpa/src/lib.rs @@ -28,7 +28,7 @@ //! //! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only //! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe -//! bug causing resulting in an equivocation. Such events are outside of the scope of this pallet. +//! bug causing resulting in an equivocation. Such events are outside the scope of this pallet. //! Shall the fork occur on the bridged chain governance intervention will be required to //! re-initialize the bridge and track the right fork. @@ -38,15 +38,14 @@ use crate::weights::WeightInfo; -use bp_header_chain::justification::GrandpaJustification; -use bp_header_chain::InitializationData; +use bp_header_chain::{justification::GrandpaJustification, InitializationData}; use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; use finality_grandpa::voter_set::VoterSet; use frame_support::{ensure, fail}; use frame_system::{ensure_signed, RawOrigin}; use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero}; -use sp_std::convert::TryInto; +use sp_std::{boxed::Box, convert::TryInto}; #[cfg(test)] mod mock; @@ -130,16 +129,13 @@ pub mod pallet { ))] pub fn submit_finality_proof( origin: OriginFor, - finality_target: BridgedHeader, + finality_target: Box>, justification: GrandpaJustification>, ) -> DispatchResultWithPostInfo { ensure_operational::()?; let _ = ensure_signed(origin)?; - ensure!( - Self::request_count() < T::MaxRequests::get(), - >::TooManyRequests - ); + ensure!(Self::request_count() < T::MaxRequests::get(), >::TooManyRequests); let (hash, number) = (finality_target.hash(), finality_target.number()); log::trace!(target: "runtime::bridge-grandpa", "Going to try and finalize header {:?}", finality_target); @@ -153,30 +149,40 @@ pub mod pallet { finality_target, ); fail!(>::NotInitialized); - } + }, }; // We do a quick check here to ensure that our header chain is making progress and isn't - // "travelling back in time" (which could be indicative of something bad, e.g a hard-fork). + // "travelling back in time" (which could be indicative of something bad, e.g a + // hard-fork). ensure!(best_finalized.number() < number, >::OldHeader); let authority_set = >::get(); let set_id = authority_set.set_id; verify_justification::(&justification, hash, *number, authority_set)?; - let _enacted = try_enact_authority_change::(&finality_target, set_id)?; + let is_authorities_change_enacted = + try_enact_authority_change::(&finality_target, set_id)?; >::mutate(|count| *count += 1); - insert_header::(finality_target, hash); + insert_header::(*finality_target, hash); log::info!(target: "runtime::bridge-grandpa", "Succesfully imported finalized header with hash {:?}!", hash); - Ok(().into()) + // mandatory header is a header that changes authorities set. The pallet can't go + // further without importing this header. So every bridge MUST import mandatory headers. + // + // We don't want to charge extra costs for mandatory operations. So relayer is not + // paying fee for mandatory headers import transactions. + let is_mandatory_header = is_authorities_change_enacted; + let pays_fee = if is_mandatory_header { Pays::No } else { Pays::Yes }; + + Ok(pays_fee.into()) } /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. /// /// The initial configuration provided does not need to be the genesis header of the bridged - /// chain, it can be any arbirary header. You can also provide the next scheduled set change - /// if it is already know. + /// chain, it can be any arbitrary header. You can also provide the next scheduled set + /// change if it is already know. /// /// This function is only allowed to be called from a trusted origin and writes to storage /// with practically no checks in terms of the validity of the data. It is important that @@ -205,17 +211,20 @@ pub mod pallet { /// /// May only be called either by root, or by `PalletOwner`. #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResultWithPostInfo { + pub fn set_owner( + origin: OriginFor, + new_owner: Option, + ) -> DispatchResultWithPostInfo { ensure_owner_or_root::(origin)?; match new_owner { Some(new_owner) => { PalletOwner::::put(&new_owner); log::info!(target: "runtime::bridge-grandpa", "Setting pallet Owner to: {:?}", new_owner); - } + }, None => { PalletOwner::::kill(); log::info!(target: "runtime::bridge-grandpa", "Removed Owner of pallet."); - } + }, } Ok(().into()) @@ -225,9 +234,12 @@ pub mod pallet { /// /// May only be called either by root, or by `PalletOwner`. #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operational(origin: OriginFor, operational: bool) -> DispatchResultWithPostInfo { + pub fn set_operational( + origin: OriginFor, + operational: bool, + ) -> DispatchResultWithPostInfo { ensure_owner_or_root::(origin)?; - >::put(operational); + >::put(!operational); if operational { log::info!(target: "runtime::bridge-grandpa", "Resuming pallet operations."); @@ -252,11 +264,13 @@ pub mod pallet { /// Hash of the header used to bootstrap the pallet. #[pallet::storage] - pub(super) type InitialHash, I: 'static = ()> = StorageValue<_, BridgedBlockHash, ValueQuery>; + pub(super) type InitialHash, I: 'static = ()> = + StorageValue<_, BridgedBlockHash, ValueQuery>; /// Hash of the best finalized header. #[pallet::storage] - pub(super) type BestFinalized, I: 'static = ()> = StorageValue<_, BridgedBlockHash, ValueQuery>; + pub(super) type BestFinalized, I: 'static = ()> = + StorageValue<_, BridgedBlockHash, ValueQuery>; /// A ring buffer of imported hashes. Ordered by the insertion time. #[pallet::storage] @@ -265,7 +279,8 @@ pub mod pallet { /// Current ring buffer position. #[pallet::storage] - pub(super) type ImportedHashesPointer, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + pub(super) type ImportedHashesPointer, I: 'static = ()> = + StorageValue<_, u32, ValueQuery>; /// Headers which have been imported into the pallet. #[pallet::storage] @@ -284,7 +299,8 @@ pub mod pallet { /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt /// flag directly or call the `halt_operations`). #[pallet::storage] - pub(super) type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; + pub(super) type PalletOwner, I: 'static = ()> = + StorageValue<_, T::AccountId, OptionQuery>; /// If true, all pallet transactions are failed immediately. #[pallet::storage] @@ -301,10 +317,7 @@ pub mod pallet { #[cfg(feature = "std")] impl, I: 'static> Default for GenesisConfig { fn default() -> Self { - Self { - owner: None, - init_data: None, - } + Self { owner: None, init_data: None } } } @@ -355,7 +368,7 @@ pub mod pallet { /// is found it will be enacted immediately. /// /// This function does not support forced changes, or scheduled changes with delays - /// since these types of changes are indicitive of abnormal behaviour from GRANDPA. + /// since these types of changes are indicative of abnormal behavior from GRANDPA. /// /// Returned value will indicate if a change was enacted or not. pub(crate) fn try_enact_authority_change, I: 'static>( @@ -401,7 +414,7 @@ pub mod pallet { /// /// Will use the GRANDPA current authorities known to the pallet. /// - /// If succesful it returns the decoded GRANDPA justification so we can refund any weight which + /// If successful it returns the decoded GRANDPA justification so we can refund any weight which /// was overcharged in the initial call. pub(crate) fn verify_justification, I: 'static>( justification: &GrandpaJustification>, @@ -411,29 +424,35 @@ pub mod pallet { ) -> Result<(), sp_runtime::DispatchError> { use bp_header_chain::justification::verify_justification; - let voter_set = VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; + let voter_set = + VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; let set_id = authority_set.set_id; - Ok( - verify_justification::>((hash, number), set_id, &voter_set, justification).map_err( - |e| { - log::error!( - target: "runtime::bridge-grandpa", - "Received invalid justification for {:?}: {:?}", - hash, - e, - ); - >::InvalidJustification - }, - )?, + Ok(verify_justification::>( + (hash, number), + set_id, + &voter_set, + justification, ) + .map_err(|e| { + log::error!( + target: "runtime::bridge-grandpa", + "Received invalid justification for {:?}: {:?}", + hash, + e, + ); + >::InvalidJustification + })?) } /// Import a previously verified header to the storage. /// /// Note this function solely takes care of updating the storage and pruning old entries, - /// but does not verify the validaty of such import. - pub(crate) fn insert_header, I: 'static>(header: BridgedHeader, hash: BridgedBlockHash) { + /// but does not verify the validity of such import. + pub(crate) fn insert_header, I: 'static>( + header: BridgedHeader, + hash: BridgedBlockHash, + ) { let index = >::get(); let pruning = >::try_get(index); >::put(hash); @@ -453,17 +472,12 @@ pub mod pallet { pub(crate) fn initialize_bridge, I: 'static>( init_params: super::InitializationData>, ) { - let super::InitializationData { - header, - authority_list, - set_id, - is_halted, - } = init_params; + let super::InitializationData { header, authority_list, set_id, is_halted } = init_params; let initial_hash = header.hash(); >::put(initial_hash); >::put(0); - insert_header::(header, initial_hash); + insert_header::(*header, initial_hash); let authority_set = bp_header_chain::AuthoritySet::new(authority_list, set_id); >::put(authority_set); @@ -498,7 +512,9 @@ pub mod pallet { fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { match origin.into() { Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) if Some(signer) == >::get().as_ref() => Ok(()), + Ok(RawOrigin::Signed(ref signer)) + if Some(signer) == >::get().as_ref() => + Ok(()), _ => Err(BadOrigin), } } @@ -545,14 +561,17 @@ impl, I: 'static> Pallet { parse: impl FnOnce(bp_runtime::StorageProofChecker>) -> R, ) -> Result { let header = >::get(hash).ok_or(Error::::UnknownHeader)?; - let storage_proof_checker = bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof) - .map_err(|_| Error::::StorageRootMismatch)?; + let storage_proof_checker = + bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof) + .map_err(|_| Error::::StorageRootMismatch)?; Ok(parse(storage_proof_checker)) } } -pub(crate) fn find_scheduled_change(header: &H) -> Option> { +pub(crate) fn find_scheduled_change( + header: &H, +) -> Option> { use sp_runtime::generic::OpaqueDigestItemId; let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); @@ -567,7 +586,7 @@ pub(crate) fn find_scheduled_change(header: &H) -> Option( header: &H, @@ -590,8 +609,9 @@ pub(crate) fn find_forced_change( #[cfg(feature = "runtime-benchmarks")] pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader) { initialize_bridge::(InitializationData { - header, - authority_list: sp_std::vec::Vec::new(), // we don't verify any proofs in external benchmarks + header: Box::new(header), + authority_list: sp_std::vec::Vec::new(), /* we don't verify any proofs in external + * benchmarks */ set_id: 0, is_halted: false, }); @@ -602,12 +622,11 @@ mod tests { use super::*; use crate::mock::{run_test, test_header, Origin, TestHeader, TestNumber, TestRuntime}; use bp_test_utils::{ - authority_list, make_default_justification, make_justification_for_header, JustificationGeneratorParams, ALICE, - BOB, + authority_list, make_default_justification, make_justification_for_header, + JustificationGeneratorParams, ALICE, BOB, }; use codec::Encode; - use frame_support::weights::PostDispatchInfo; - use frame_support::{assert_err, assert_noop, assert_ok}; + use frame_support::{assert_err, assert_noop, assert_ok, weights::PostDispatchInfo}; use sp_runtime::{Digest, DigestItem, DispatchError}; fn initialize_substrate_bridge() { @@ -616,11 +635,14 @@ mod tests { fn init_with_origin( origin: Origin, - ) -> Result, sp_runtime::DispatchErrorWithPostInfo> { + ) -> Result< + InitializationData, + sp_runtime::DispatchErrorWithPostInfo, + > { let genesis = test_header(0); let init_data = InitializationData { - header: genesis, + header: Box::new(genesis), authority_list: authority_list(), set_id: 1, is_halted: false, @@ -633,7 +655,11 @@ mod tests { let header = test_header(header.into()); let justification = make_default_justification(&header); - Pallet::::submit_finality_proof(Origin::signed(1), header, justification) + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header), + justification, + ) } fn next_block() { @@ -645,14 +671,13 @@ mod tests { } fn change_log(delay: u64) -> Digest { - let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { - next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], - delay, - }); + let consensus_log = + ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { + next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], + delay, + }); - Digest { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - } + Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } } fn forced_change_log(delay: u64) -> Digest { @@ -664,9 +689,7 @@ mod tests { }, ); - Digest { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - } + Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } } #[test] @@ -775,9 +798,13 @@ mod tests { #[test] fn pallet_rejects_transactions_if_halted() { run_test(|| { - >::put(true); + initialize_substrate_bridge(); + assert_ok!(Pallet::::set_operational(Origin::root(), false)); assert_noop!(submit_finality_proof(1), Error::::Halted); + + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + assert_ok!(submit_finality_proof(1)); }) } @@ -792,7 +819,13 @@ mod tests { fn succesfully_imports_header_with_valid_finality() { run_test(|| { initialize_substrate_bridge(); - assert_ok!(submit_finality_proof(1)); + assert_ok!( + submit_finality_proof(1), + PostDispatchInfo { + actual_weight: None, + pays_fee: frame_support::weights::Pays::Yes, + }, + ); let header = test_header(1); assert_eq!(>::get(), header.hash()); @@ -807,14 +840,16 @@ mod tests { let header = test_header(1); - let params = JustificationGeneratorParams:: { - set_id: 2, - ..Default::default() - }; + let params = + JustificationGeneratorParams:: { set_id: 2, ..Default::default() }; let justification = make_justification_for_header(params); assert_err!( - Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header), + justification, + ), >::InvalidJustification ); }) @@ -830,7 +865,11 @@ mod tests { justification.round = 42; assert_err!( - Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header), + justification, + ), >::InvalidJustification ); }) @@ -843,7 +882,7 @@ mod tests { let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)]; let init_data = InitializationData { - header: genesis, + header: Box::new(genesis), authority_list: invalid_authority_list, set_id: 1, is_halted: false, @@ -855,7 +894,11 @@ mod tests { let justification = make_default_justification(&header); assert_err!( - Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header), + justification, + ), >::InvalidAuthoritySet ); }) @@ -889,11 +932,17 @@ mod tests { let justification = make_default_justification(&header); // Let's import our test header - assert_ok!(Pallet::::submit_finality_proof( - Origin::signed(1), - header.clone(), - justification - )); + assert_ok!( + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header.clone()), + justification + ), + PostDispatchInfo { + actual_weight: None, + pays_fee: frame_support::weights::Pays::No, + }, + ); // Make sure that our header is the best finalized assert_eq!(>::get(), header.hash()); @@ -922,7 +971,11 @@ mod tests { // Should not be allowed to import this header assert_err!( - Pallet::::submit_finality_proof(Origin::signed(1), header, justification), + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header), + justification + ), >::UnsupportedScheduledChange ); }) @@ -943,7 +996,11 @@ mod tests { // Should not be allowed to import this header assert_err!( - Pallet::::submit_finality_proof(Origin::signed(1), header, justification), + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header), + justification + ), >::UnsupportedScheduledChange ); }) @@ -1001,7 +1058,11 @@ mod tests { let mut invalid_justification = make_default_justification(&header); invalid_justification.round = 42; - Pallet::::submit_finality_proof(Origin::signed(1), header, invalid_justification) + Pallet::::submit_finality_proof( + Origin::signed(1), + Box::new(header), + invalid_justification, + ) }; initialize_substrate_bridge(); diff --git a/polkadot/bridges/modules/grandpa/src/mock.rs b/polkadot/bridges/modules/grandpa/src/mock.rs index 8851dbb041a677ede0137226c9186be42af4e4c0..f8b5e269323f933cb878ca82964cb1e181c4a681 100644 --- a/polkadot/bridges/modules/grandpa/src/mock.rs +++ b/polkadot/bridges/modules/grandpa/src/mock.rs @@ -19,6 +19,7 @@ use bp_runtime::Chain; use frame_support::{construct_runtime, parameter_types, weights::Weight}; +use sp_core::sr25519::Signature; use sp_runtime::{ testing::{Header, H256}, traits::{BlakeTwo256, IdentityLookup}, @@ -100,6 +101,11 @@ impl Chain for TestBridgedChain { type Hash = ::Hash; type Hasher = ::Hashing; type Header = ::Header; + + type AccountId = AccountId; + type Balance = u64; + type Index = u64; + type Signature = Signature; } pub fn run_test(test: impl FnOnce() -> T) -> T { diff --git a/polkadot/bridges/modules/grandpa/src/weights.rs b/polkadot/bridges/modules/grandpa/src/weights.rs index 18d88049f16a8b9dcb68edc5efde6aef4e32c160..c0cce2c5258d126246c14c3a228bca3354b074a7 100644 --- a/polkadot/bridges/modules/grandpa/src/weights.rs +++ b/polkadot/bridges/modules/grandpa/src/weights.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for pallet_bridge_grandpa +//! Autogenerated weights for `pallet_bridge_grandpa` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-03, STEPS: [50, ], REPEAT: 20 @@ -46,12 +46,12 @@ use frame_support::{ }; use sp_std::marker::PhantomData; -/// Weight functions needed for pallet_bridge_grandpa. +/// Weight functions needed for `pallet_bridge_grandpa`. pub trait WeightInfo { fn submit_finality_proof(p: u32, v: u32) -> Weight; } -/// Weights for pallet_bridge_grandpa using the Rialto node and recommended hardware. +/// Weights for `pallet_bridge_grandpa` using the Rialto node and recommended hardware. pub struct RialtoWeight(PhantomData); impl WeightInfo for RialtoWeight { fn submit_finality_proof(p: u32, v: u32) -> Weight { diff --git a/polkadot/bridges/modules/messages/Cargo.toml b/polkadot/bridges/modules/messages/Cargo.toml index 10577c1406447962a52bcf23582e9d907b0b0d3b..b48bdc5c0ffd092838ec00bc21bac7b3ad880a57 100644 --- a/polkadot/bridges/modules/messages/Cargo.toml +++ b/polkadot/bridges/modules/messages/Cargo.toml @@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] bitvec = { version = "0.20", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } log = { version = "0.4.14", default-features = false } num-traits = { version = "0.2", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } @@ -18,17 +18,16 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } bp-messages = { path = "../../primitives/messages", default-features = false } -bp-rialto = { path = "../../primitives/chain-rialto", default-features = false } bp-runtime = { path = "../../primitives/runtime", default-features = false } # Substrate Dependencies -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [dev-dependencies] hex = "0.4" @@ -42,7 +41,6 @@ std = [ "bp-message-dispatch/std", "bp-messages/std", "bp-runtime/std", - "bp-rialto/std", "codec/std", "frame-support/std", "frame-system/std", diff --git a/polkadot/bridges/modules/messages/README.md b/polkadot/bridges/modules/messages/README.md index be25b3c37f757a289719d925d138405fc32782ea..062a966fad70a3ccfef72f93a67884ab76e8a535 100644 --- a/polkadot/bridges/modules/messages/README.md +++ b/polkadot/bridges/modules/messages/README.md @@ -354,7 +354,7 @@ Both conditions are verified by `pallet_bridge_messages::ensure_weights_are_corr `pallet_bridge_messages::ensure_able_to_receive_messages` functions, which must be called from every runtime's tests. -### Post-dispatch weight refunds of the `receive_messages_proof` call +#### Post-dispatch weight refunds of the `receive_messages_proof` call Weight formula of the `receive_messages_proof` call assumes that the dispatch fee of every message is paid at the target chain (where call is executed), that every message will be dispatched and that @@ -388,6 +388,7 @@ The weight formula is: Weight = BaseWeight + MessagesCount * MessageConfirmationWeight + RelayersCount * RelayerRewardWeight + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight + + MessagesCount * (DbReadWeight + DbWriteWeight) ``` Where: @@ -403,6 +404,15 @@ Where: | `ExpectedProofSize` | `EXTRA_STORAGE_PROOF_SIZE` | Size of proof that we are expecting | | `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)` | Weight of processing every additional proof byte over `ExpectedProofSize` limit. We're using the same formula, as for message delivery, because proof mechanism is assumed to be the same in both cases | +#### Post-dispatch weight refunds of the `receive_messages_delivery_proof` call + +Weight formula of the `receive_messages_delivery_proof` call assumes that all messages in the proof +are actually delivered (so there are no already confirmed messages) and every messages is processed +by the `OnDeliveryConfirmed` callback. This means that for every message, we're adding single db read +weight and single db write weight. If, by some reason, messages are not processed by the +`OnDeliveryConfirmed` callback, or their processing is faster than that additional weight, the +difference is refunded to the submitter. + #### Why we're always able to craft `receive_messages_delivery_proof` transaction? There can be at most `::MaxUnconfirmedMessagesAtInboundLane` diff --git a/polkadot/bridges/modules/messages/src/benchmarking.rs b/polkadot/bridges/modules/messages/src/benchmarking.rs index 54cb7c26cd3d76fc2fd1a76bc705b8e8d03af115..788ccc070310ea288f264b082798209fdbb5da3c 100644 --- a/polkadot/bridges/modules/messages/src/benchmarking.rs +++ b/polkadot/bridges/modules/messages/src/benchmarking.rs @@ -16,18 +16,18 @@ //! Messages pallet benchmarking. -use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH; use crate::{ - inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, outbound_lane::ReceivalConfirmationResult, - Call, Instance, + inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, + outbound_lane::ReceivalConfirmationResult, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH, Call, }; use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, InboundLaneData, LaneId, - MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState, + source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, + InboundLaneData, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, + UnrewardedRelayersState, }; use bp_runtime::messages::DispatchFeePayment; -use frame_benchmarking::{account, benchmarks_instance}; +use frame_benchmarking::{account, benchmarks_instance_pallet}; use frame_support::{traits::Get, weights::Weight}; use frame_system::RawOrigin; use sp_std::{ @@ -38,23 +38,23 @@ use sp_std::{ }; /// Fee paid by submitter for single message delivery. -pub const MESSAGE_FEE: u64 = 10_000_000_000; +pub const MESSAGE_FEE: u64 = 100_000_000_000; const SEED: u32 = 0; /// Pallet we're benchmarking here. -pub struct Pallet, I: crate::Instance>(crate::Pallet); +pub struct Pallet, I: 'static>(crate::Pallet); /// Proof size requirements. pub enum ProofSize { /// The proof is expected to be minimal. If value size may be changed, then it is expected to /// have given size. Minimal(u32), - /// The proof is expected to have at least given size and grow by increasing number of trie nodes - /// included in the proof. + /// The proof is expected to have at least given size and grow by increasing number of trie + /// nodes included in the proof. HasExtraNodes(u32), - /// The proof is expected to have at least given size and grow by increasing value that is stored - /// in the trie. + /// The proof is expected to have at least given size and grow by increasing value that is + /// stored in the trie. HasLargeLeaf(u32), } @@ -91,7 +91,7 @@ pub struct MessageDeliveryProofParams { } /// Trait that must be implemented by runtime. -pub trait Config: crate::Config { +pub trait Config: crate::Config { /// Lane id to use in benchmarks. fn bench_lane_id() -> LaneId { Default::default() @@ -123,7 +123,7 @@ pub trait Config: crate::Config { fn is_message_dispatched(nonce: MessageNonce) -> bool; } -benchmarks_instance! { +benchmarks_instance_pallet! { // // Benchmarks that are used directly by the runtime. // @@ -237,7 +237,9 @@ benchmarks_instance! { // Benchmark `increase_message_fee` with following conditions: // * message has maximal message; // * submitter account is killed because its balance is less than ED after payment. - increase_message_fee { + // + // Result of this benchmark is directly used by weight formula of the call. + maximal_increase_message_fee { let sender = account("sender", 42, SEED); T::endow_account(&sender); @@ -251,6 +253,25 @@ benchmarks_instance! { assert_eq!(T::account_balance(&sender), 0.into()); } + // Benchmark `increase_message_fee` with following conditions: + // * message size varies from minimal to maximal; + // * submitter account is killed because its balance is less than ED after payment. + increase_message_fee { + let i in 0..T::maximal_message_size().try_into().unwrap_or_default(); + + let sender = account("sender", 42, SEED); + T::endow_account(&sender); + + let additional_fee = T::account_balance(&sender); + let lane_id = T::bench_lane_id(); + let nonce = 1; + + send_regular_message_with_payload::(vec![42u8; i as _]); + }: increase_message_fee(RawOrigin::Signed(sender.clone()), lane_id, nonce, additional_fee) + verify { + assert_eq!(T::account_balance(&sender), 0.into()); + } + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: // * proof does not include outbound lane state proof; // * inbound lane already has state, so it needs to be read and decoded; @@ -463,7 +484,7 @@ benchmarks_instance! { // // This is base benchmark for all other confirmations delivery benchmarks. receive_delivery_proof_for_single_message { - let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayers_fund_id = crate::relayer_fund_account_id::(); let relayer_id: T::AccountId = account("relayer", 0, SEED); let relayer_balance = T::account_balance(&relayer_id); T::endow_account(&relayers_fund_id); @@ -503,7 +524,7 @@ benchmarks_instance! { // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) // - weight(receive_delivery_proof_for_single_message)`. receive_delivery_proof_for_two_messages_by_single_relayer { - let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayers_fund_id = crate::relayer_fund_account_id::(); let relayer_id: T::AccountId = account("relayer", 0, SEED); let relayer_balance = T::account_balance(&relayer_id); T::endow_account(&relayers_fund_id); @@ -543,7 +564,7 @@ benchmarks_instance! { // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. receive_delivery_proof_for_two_messages_by_two_relayers { - let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayers_fund_id = crate::relayer_fund_account_id::(); let relayer1_id: T::AccountId = account("relayer1", 1, SEED); let relayer1_balance = T::account_balance(&relayer1_id); let relayer2_id: T::AccountId = account("relayer2", 2, SEED); @@ -790,7 +811,7 @@ benchmarks_instance! { .try_into() .expect("Value of MaxUnrewardedRelayerEntriesAtInboundLane is too large"); - let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayers_fund_id = crate::relayer_fund_account_id::(); let relayer_id: T::AccountId = account("relayer", 0, SEED); let relayer_balance = T::account_balance(&relayer_id); T::endow_account(&relayers_fund_id); @@ -833,7 +854,7 @@ benchmarks_instance! { .try_into() .expect("Value of MaxUnconfirmedMessagesAtInboundLane is too large "); - let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayers_fund_id = crate::relayer_fund_account_id::(); let confirmation_relayer_id = account("relayer", 0, SEED); let relayers: BTreeMap = (1..=i) .map(|j| { @@ -877,23 +898,17 @@ benchmarks_instance! { } } -fn send_regular_message, I: Instance>() { +fn send_regular_message, I: 'static>() { let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(MessageData { - payload: vec![], - fee: MESSAGE_FEE.into(), - }); + outbound_lane.send_message(MessageData { payload: vec![], fee: MESSAGE_FEE.into() }); } -fn send_regular_message_with_payload, I: Instance>(payload: Vec) { +fn send_regular_message_with_payload, I: 'static>(payload: Vec) { let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(MessageData { - payload, - fee: MESSAGE_FEE.into(), - }); + outbound_lane.send_message(MessageData { payload, fee: MESSAGE_FEE.into() }); } -fn confirm_message_delivery, I: Instance>(nonce: MessageNonce) { +fn confirm_message_delivery, I: 'static>(nonce: MessageNonce) { let mut outbound_lane = outbound_lane::(T::bench_lane_id()); let latest_received_nonce = outbound_lane.data().latest_received_nonce; let mut relayers = VecDeque::with_capacity((nonce - latest_received_nonce) as usize); @@ -904,12 +919,12 @@ fn confirm_message_delivery, I: Instance>(nonce: MessageNonce) { }); } assert!(matches!( - outbound_lane.confirm_delivery(nonce, &relayers), + outbound_lane.confirm_delivery(nonce - latest_received_nonce, nonce, &relayers), ReceivalConfirmationResult::ConfirmedMessages(_), )); } -fn receive_messages, I: Instance>(nonce: MessageNonce) { +fn receive_messages, I: 'static>(nonce: MessageNonce) { let mut inbound_lane_storage = inbound_lane_storage::(T::bench_lane_id()); inbound_lane_storage.set_data(InboundLaneData { relayers: vec![UnrewardedRelayer { @@ -922,7 +937,10 @@ fn receive_messages, I: Instance>(nonce: MessageNonce) { }); } -fn ensure_relayer_rewarded, I: Instance>(relayer_id: &T::AccountId, old_balance: &T::OutboundMessageFee) { +fn ensure_relayer_rewarded, I: 'static>( + relayer_id: &T::AccountId, + old_balance: &T::OutboundMessageFee, +) { let new_balance = T::account_balance(relayer_id); assert!( new_balance > *old_balance, diff --git a/polkadot/bridges/modules/messages/src/inbound_lane.rs b/polkadot/bridges/modules/messages/src/inbound_lane.rs index 83d17dc3c06c23ae8fd16824825f14bd04397e36..00875bb878a823beda55a136ab96910ea8eceaaf 100644 --- a/polkadot/bridges/modules/messages/src/inbound_lane.rs +++ b/polkadot/bridges/modules/messages/src/inbound_lane.rs @@ -18,7 +18,8 @@ use bp_messages::{ target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, UnrewardedRelayer, + DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, + UnrewardedRelayer, }; use bp_runtime::messages::MessageDispatchResult; use frame_support::RuntimeDebug; @@ -53,7 +54,7 @@ pub enum ReceivalResult { Dispatched(MessageDispatchResult), /// Message has invalid nonce and lane has rejected to accept this message. InvalidNonce, - /// There are too many unrewarded relayer entires at the lane. + /// There are too many unrewarded relayer entries at the lane. TooManyUnrewardedRelayers, /// There are too many unconfirmed messages at the lane. TooManyUnconfirmedMessages, @@ -71,16 +72,19 @@ impl InboundLane { } /// Receive state of the corresponding outbound lane. - pub fn receive_state_update(&mut self, outbound_lane_data: OutboundLaneData) -> Option { + pub fn receive_state_update( + &mut self, + outbound_lane_data: OutboundLaneData, + ) -> Option { let mut data = self.storage.data(); let last_delivered_nonce = data.last_delivered_nonce(); if outbound_lane_data.latest_received_nonce > last_delivered_nonce { // this is something that should never happen if proofs are correct - return None; + return None } if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce { - return None; + return None } let new_confirmed_nonce = outbound_lane_data.latest_received_nonce; @@ -95,7 +99,8 @@ impl InboundLane { data.relayers.pop_front(); } // Secondly, update the next record with lower nonce equal to new confirmed nonce if needed. - // Note: There will be max. 1 record to update as we don't allow messages from relayers to overlap. + // Note: There will be max. 1 record to update as we don't allow messages from relayers to + // overlap. match data.relayers.front_mut() { Some(entry) if entry.messages.begin < new_confirmed_nonce => { entry.messages.dispatch_results = entry @@ -103,8 +108,8 @@ impl InboundLane { .dispatch_results .split_off((new_confirmed_nonce + 1 - entry.messages.begin) as _); entry.messages.begin = new_confirmed_nonce + 1; - } - _ => {} + }, + _ => {}, } self.storage.set_data(data); @@ -122,30 +127,25 @@ impl InboundLane { let mut data = self.storage.data(); let is_correct_message = nonce == data.last_delivered_nonce() + 1; if !is_correct_message { - return ReceivalResult::InvalidNonce; + return ReceivalResult::InvalidNonce } // if there are more unrewarded relayer entries than we may accept, reject this message if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return ReceivalResult::TooManyUnrewardedRelayers; + return ReceivalResult::TooManyUnrewardedRelayers } // if there are more unconfirmed messages than we may accept, reject this message let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return ReceivalResult::TooManyUnconfirmedMessages; + return ReceivalResult::TooManyUnconfirmedMessages } - // dispatch message before updating anything in the storage. If dispatch would panic, - // (which should not happen in the runtime) then we simply won't consider message as - // delivered (no changes to the inbound lane storage have been made). + // then, dispatch message let dispatch_result = P::dispatch( relayer_at_this_chain, DispatchMessage { - key: MessageKey { - lane_id: self.storage.id(), - nonce, - }, + key: MessageKey { lane_id: self.storage.id(), nonce }, data: message_data, }, ); @@ -155,7 +155,7 @@ impl InboundLane { Some(entry) if entry.relayer == *relayer_at_bridged_chain => { entry.messages.note_dispatched_message(dispatch_result.dispatch_result); false - } + }, _ => true, }; if push_new { @@ -176,14 +176,15 @@ mod tests { use crate::{ inbound_lane, mock::{ - dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch, TestRuntime, - REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C, + dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch, + TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, + TEST_RELAYER_C, }, - DefaultInstance, RuntimeInboundLaneStorage, + RuntimeInboundLaneStorage, }; fn receive_regular_message( - lane: &mut InboundLane>, + lane: &mut InboundLane>, nonce: MessageNonce, ) { assert_eq!( @@ -286,16 +287,10 @@ mod tests { let mut seed_storage_data = lane.storage.data(); // Prepare data seed_storage_data.last_confirmed_nonce = 0; - seed_storage_data - .relayers - .push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A)); + seed_storage_data.relayers.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A)); // Simulate messages batch (2, 3, 4) from relayer #2 - seed_storage_data - .relayers - .push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B)); - seed_storage_data - .relayers - .push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C)); + seed_storage_data.relayers.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B)); + seed_storage_data.relayers.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C)); lane.storage.set_data(seed_storage_data); // Check assert_eq!( @@ -337,7 +332,8 @@ mod tests { fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); + let max_nonce = + ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); for current_nonce in 1..max_nonce + 1 { assert_eq!( lane.receive_message::( @@ -376,7 +372,8 @@ mod tests { fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() { run_test(|| { let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = ::MaxUnconfirmedMessagesAtInboundLane::get(); + let max_nonce = + ::MaxUnconfirmedMessagesAtInboundLane::get(); for current_nonce in 1..=max_nonce { assert_eq!( lane.receive_message::( diff --git a/polkadot/bridges/modules/messages/src/instant_payments.rs b/polkadot/bridges/modules/messages/src/instant_payments.rs index 524a3765d6ad4924c56c6d6e55b117f03915e774..c145687af994f0701c4ebd6ba0034babfa998e56 100644 --- a/polkadot/bridges/modules/messages/src/instant_payments.rs +++ b/polkadot/bridges/modules/messages/src/instant_payments.rs @@ -19,58 +19,57 @@ //! The payment is first transferred to a special `relayers-fund` account and only transferred //! to the actual relayer in case confirmation is received. +use crate::OutboundMessages; + use bp_messages::{ source_chain::{MessageDeliveryAndDispatchPayment, RelayersRewards, Sender}, - MessageNonce, + LaneId, MessageKey, MessageNonce, UnrewardedRelayer, }; use codec::Encode; use frame_support::traits::{Currency as CurrencyT, ExistenceRequirement, Get}; -use num_traits::Zero; +use num_traits::{SaturatingAdd, Zero}; use sp_runtime::traits::Saturating; -use sp_std::fmt::Debug; +use sp_std::{collections::vec_deque::VecDeque, fmt::Debug, ops::RangeInclusive}; /// Instant message payments made in given currency. /// -/// The balance is initally reserved in a special `relayers-fund` account, and transferred +/// The balance is initially reserved in a special `relayers-fund` account, and transferred /// to the relayer when message delivery is confirmed. /// -/// Additionaly, confirmation transaction submitter (`confirmation_relayer`) is reimbursed +/// Additionally, confirmation transaction submitter (`confirmation_relayer`) is reimbursed /// with the confirmation rewards (part of message fee, reserved to pay for delivery confirmation). /// /// NOTE The `relayers-fund` account must always exist i.e. be over Existential Deposit (ED; the -/// pallet enforces that) to make sure that even if the message cost is below ED it is still payed +/// pallet enforces that) to make sure that even if the message cost is below ED it is still paid /// to the relayer account. /// NOTE It's within relayer's interest to keep their balance above ED as well, to make sure they /// can receive the payment. -pub struct InstantCurrencyPayments { - _phantom: sp_std::marker::PhantomData<(T, Currency, GetConfirmationFee, RootAccount)>, +pub struct InstantCurrencyPayments { + _phantom: sp_std::marker::PhantomData<(T, I, Currency, GetConfirmationFee, RootAccount)>, } -impl MessageDeliveryAndDispatchPayment - for InstantCurrencyPayments +impl + MessageDeliveryAndDispatchPayment + for InstantCurrencyPayments where - T: frame_system::Config, - Currency: CurrencyT, + T: frame_system::Config + crate::Config, + I: 'static, + Currency: CurrencyT, Currency::Balance: From, GetConfirmationFee: Get, RootAccount: Get>, { type Error = &'static str; - fn initialize(relayer_fund_account: &T::AccountId) -> usize { - assert!( - frame_system::Pallet::::account_exists(relayer_fund_account), - "The relayer fund account ({:?}) must exist for the message lanes pallet to work correctly.", - relayer_fund_account, - ); - 1 - } - fn pay_delivery_and_dispatch_fee( submitter: &Sender, fee: &Currency::Balance, relayer_fund_account: &T::AccountId, ) -> Result<(), Self::Error> { + if !frame_system::Pallet::::account_exists(relayer_fund_account) { + return Err("The relayer fund account must exist for the message lanes pallet to work correctly."); + } + let root_account = RootAccount::get(); let account = match submitter { Sender::Signed(submitter) => submitter, @@ -90,19 +89,55 @@ where } fn pay_relayers_rewards( + lane_id: LaneId, + messages_relayers: VecDeque>, confirmation_relayer: &T::AccountId, - relayers_rewards: RelayersRewards, + received_range: &RangeInclusive, relayer_fund_account: &T::AccountId, ) { - pay_relayers_rewards::( - confirmation_relayer, - relayers_rewards, - relayer_fund_account, - GetConfirmationFee::get(), - ); + let relayers_rewards = + cal_relayers_rewards::(lane_id, messages_relayers, received_range); + if !relayers_rewards.is_empty() { + pay_relayers_rewards::( + confirmation_relayer, + relayers_rewards, + relayer_fund_account, + GetConfirmationFee::get(), + ); + } } } +/// Calculate the relayers rewards +pub(crate) fn cal_relayers_rewards( + lane_id: LaneId, + messages_relayers: VecDeque>, + received_range: &RangeInclusive, +) -> RelayersRewards +where + T: frame_system::Config + crate::Config, + I: 'static, +{ + // remember to reward relayers that have delivered messages + // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain + let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new(); + for entry in messages_relayers { + let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start()); + let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end()); + + // loop won't proceed if current entry is ahead of received range (begin > end). + // this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain + let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default(); + for nonce in nonce_begin..nonce_end + 1 { + let message_data = OutboundMessages::::get(MessageKey { lane_id, nonce }) + .expect("message was just confirmed; we never prune unconfirmed messages; qed"); + relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee); + relayer_reward.messages += 1; + } + } + relayers_rewards +} + /// Pay rewards to given relayers, optionally rewarding confirmation relayer. fn pay_relayers_rewards( confirmation_relayer: &AccountId, @@ -123,26 +158,31 @@ fn pay_relayers_rewards( // If delivery confirmation is submitted by other relayer, let's deduct confirmation fee // from relayer reward. // - // If confirmation fee has been increased (or if it was the only component of message fee), - // then messages relayer may receive zero reward. + // If confirmation fee has been increased (or if it was the only component of message + // fee), then messages relayer may receive zero reward. let mut confirmation_reward = confirmation_fee.saturating_mul(reward.messages.into()); if confirmation_reward > relayer_reward { confirmation_reward = relayer_reward; } relayer_reward = relayer_reward.saturating_sub(confirmation_reward); - confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(confirmation_reward); + confirmation_relayer_reward = + confirmation_relayer_reward.saturating_add(confirmation_reward); } else { // If delivery confirmation is submitted by this relayer, let's add confirmation fee // from other relayers to this relayer reward. confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(reward.reward); - continue; + continue } pay_relayer_reward::(relayer_fund_account, &relayer, relayer_reward); } // finally - pay reward to confirmation relayer - pay_relayer_reward::(relayer_fund_account, confirmation_relayer, confirmation_relayer_reward); + pay_relayer_reward::( + relayer_fund_account, + confirmation_relayer, + confirmation_relayer_reward, + ); } /// Transfer funds from relayers fund account to given relayer. @@ -155,7 +195,7 @@ fn pay_relayer_reward( Currency: CurrencyT, { if reward.is_zero() { - return; + return } let pay_result = Currency::transfer( @@ -198,20 +238,8 @@ mod tests { fn relayers_rewards() -> RelayersRewards { vec![ - ( - RELAYER_1, - RelayerRewards { - reward: 100, - messages: 2, - }, - ), - ( - RELAYER_2, - RelayerRewards { - reward: 100, - messages: 3, - }, - ), + (RELAYER_1, RelayerRewards { reward: 100, messages: 2 }), + (RELAYER_2, RelayerRewards { reward: 100, messages: 3 }), ] .into_iter() .collect() @@ -220,7 +248,12 @@ mod tests { #[test] fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { run_test(|| { - pay_relayers_rewards::(&RELAYER_2, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10); + pay_relayers_rewards::( + &RELAYER_2, + relayers_rewards(), + &RELAYERS_FUND_ACCOUNT, + 10, + ); assert_eq!(Balances::free_balance(&RELAYER_1), 80); assert_eq!(Balances::free_balance(&RELAYER_2), 120); @@ -230,7 +263,12 @@ mod tests { #[test] fn confirmation_relayer_is_rewarded_if_it_has_not_delivered_any_delivered_messages() { run_test(|| { - pay_relayers_rewards::(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10); + pay_relayers_rewards::( + &RELAYER_3, + relayers_rewards(), + &RELAYERS_FUND_ACCOUNT, + 10, + ); assert_eq!(Balances::free_balance(&RELAYER_1), 80); assert_eq!(Balances::free_balance(&RELAYER_2), 70); @@ -241,7 +279,12 @@ mod tests { #[test] fn only_confirmation_relayer_is_rewarded_if_confirmation_fee_has_significantly_increased() { run_test(|| { - pay_relayers_rewards::(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 1000); + pay_relayers_rewards::( + &RELAYER_3, + relayers_rewards(), + &RELAYERS_FUND_ACCOUNT, + 1000, + ); assert_eq!(Balances::free_balance(&RELAYER_1), 0); assert_eq!(Balances::free_balance(&RELAYER_2), 0); diff --git a/polkadot/bridges/modules/messages/src/lib.rs b/polkadot/bridges/modules/messages/src/lib.rs index 5594fdca6bfc0395d9637ecd914edd0944b4e1bc..80d946112c6f894052d6519021fe07be32a318d7 100644 --- a/polkadot/bridges/modules/messages/src/lib.rs +++ b/polkadot/bridges/modules/messages/src/lib.rs @@ -38,35 +38,39 @@ #![allow(clippy::unused_unit)] pub use crate::weights_ext::{ - ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt, - EXPECTED_DEFAULT_MESSAGE_LENGTH, + ensure_able_to_receive_confirmation, ensure_able_to_receive_message, + ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH, }; -use crate::inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult}; -use crate::outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult}; -use crate::weights::WeightInfo; +use crate::{ + inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult}, + outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult}, + weights::WeightInfo, +}; use bp_messages::{ source_chain::{ - LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, TargetHeaderChain, + LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, + OnMessageAccepted, SendMessageArtifacts, TargetHeaderChain, + }, + target_chain::{ + DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain, }, - target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce, - OperatingMode, OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState, + total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey, + MessageNonce, OperatingMode, OutboundLaneData, Parameter as MessagesParameter, + UnrewardedRelayersState, }; -use bp_runtime::Size; +use bp_runtime::{ChainId, Size}; use codec::{Decode, Encode}; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - dispatch::DispatchResultWithPostInfo, - ensure, fail, + fail, traits::Get, - weights::{DispatchClass, Pays, PostDispatchInfo, Weight}, - Parameter, StorageMap, + weights::{Pays, PostDispatchInfo}, }; -use frame_system::{ensure_signed, RawOrigin}; +use frame_system::RawOrigin; use num_traits::{SaturatingAdd, Zero}; -use sp_runtime::{traits::BadOrigin, DispatchResult}; +use sp_core::H256; +use sp_runtime::traits::{BadOrigin, Convert}; use sp_std::{cell::RefCell, cmp::PartialOrd, marker::PhantomData, prelude::*}; mod inbound_lane; @@ -82,191 +86,138 @@ pub mod benchmarking; #[cfg(test)] mod mock; -/// The module configuration trait -pub trait Config: frame_system::Config { - // General types +pub use pallet::*; - /// They overarching event type. - type Event: From> + Into<::Event>; - /// Benchmarks results from runtime we're plugged into. - type WeightInfo: WeightInfoExt; - /// Pallet parameter that is opaque to the pallet itself, but may be used by the runtime - /// for integrating the pallet. - /// - /// All pallet parameters may only be updated either by the root, or by the pallet owner. - type Parameter: MessagesParameter; - - /// Maximal number of messages that may be pruned during maintenance. Maintenance occurs - /// whenever new message is sent. The reason is that if you want to use lane, you should - /// be ready to pay for its maintenance. - type MaxMessagesToPruneAtOnce: Get; - /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the - /// relayer has delivered messages, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations yet. - /// - /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep - /// in mind that the same relayer account may take several (non-consecutive) entries in this - /// set. - type MaxUnrewardedRelayerEntriesAtInboundLane: Get; - /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the - /// message has been delivered, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations for these messages yet. - /// - /// This constant limits difference between last message from last entry of the - /// `InboundLaneData::relayers` and first message at the first entry. - /// - /// There is no point of making this parameter lesser than MaxUnrewardedRelayerEntriesAtInboundLane, - /// because then maximal number of relayer entries will be limited by maximal number of messages. - /// - /// This value also represents maximal number of messages in single delivery transaction. Transaction - /// that is declaring more messages than this value, will be rejected. Even if these messages are - /// from different lanes. - type MaxUnconfirmedMessagesAtInboundLane: Get; - - /// Payload type of outbound messages. This payload is dispatched on the bridged chain. - type OutboundPayload: Parameter + Size; - /// Message fee type of outbound messages. This fee is paid on this chain. - type OutboundMessageFee: Default + From + PartialOrd + Parameter + SaturatingAdd + Zero; - - /// Payload type of inbound messages. This payload is dispatched on this chain. - type InboundPayload: Decode; - /// Message fee type of inbound messages. This fee is paid on the bridged chain. - type InboundMessageFee: Decode; - /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the bridged chain. - type InboundRelayer: Parameter; - - /// A type which can be turned into an AccountId from a 256-bit hash. - /// - /// Used when deriving the shared relayer fund account. - type AccountIdConverter: sp_runtime::traits::Convert; - - // Types that are used by outbound_lane (on source chain). - - /// Target header chain. - type TargetHeaderChain: TargetHeaderChain; - /// Message payload verifier. - type LaneMessageVerifier: LaneMessageVerifier; - /// Message delivery payment. - type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment; - /// Handler for delivered messages. - type OnDeliveryConfirmed: OnDeliveryConfirmed; - - // Types that are used by inbound_lane (on target chain). - - /// Source header chain, as it is represented on target chain. - type SourceHeaderChain: SourceHeaderChain; - /// Message dispatch. - type MessageDispatch: MessageDispatch< - Self::AccountId, - Self::InboundMessageFee, - DispatchPayload = Self::InboundPayload, - >; -} - -/// Shortcut to messages proof type for Config. -type MessagesProofOf = - <>::SourceHeaderChain as SourceHeaderChain<>::InboundMessageFee>>::MessagesProof; -/// Shortcut to messages delivery proof type for Config. -type MessagesDeliveryProofOf = <>::TargetHeaderChain as TargetHeaderChain< - >::OutboundPayload, - ::AccountId, ->>::MessagesDeliveryProof; - -decl_error! { - pub enum Error for Pallet, I: Instance> { - /// All pallet operations are halted. - Halted, - /// Message has been treated as invalid by chain verifier. - MessageRejectedByChainVerifier, - /// Message has been treated as invalid by lane verifier. - MessageRejectedByLaneVerifier, - /// Submitter has failed to pay fee for delivering and dispatching messages. - FailedToWithdrawMessageFee, - /// The transaction brings too many messages. - TooManyMessagesInTheProof, - /// Invalid messages has been submitted. - InvalidMessagesProof, - /// Invalid messages dispatch weight has been declared by the relayer. - InvalidMessagesDispatchWeight, - /// Invalid messages delivery proof has been submitted. - InvalidMessagesDeliveryProof, - /// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane). - InvalidUnrewardedRelayers, - /// The relayer has declared invalid unrewarded relayers state in the `receive_messages_delivery_proof` call. - InvalidUnrewardedRelayersState, - /// The message someone is trying to work with (i.e. increase fee) is already-delivered. - MessageIsAlreadyDelivered, - /// The message someone is trying to work with (i.e. increase fee) is not yet sent. - MessageIsNotYetSent - } -} - -decl_storage! { - trait Store for Pallet, I: Instance = DefaultInstance> as BridgeMessages { - /// Optional pallet owner. +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + // General types + + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// Benchmarks results from runtime we're plugged into. + type WeightInfo: WeightInfoExt; + + /// Gets the chain id value from the instance. + #[pallet::constant] + type BridgedChainId: Get; + /// Pallet parameter that is opaque to the pallet itself, but may be used by the runtime + /// for integrating the pallet. /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - pub PalletOwner get(fn module_owner): Option; - /// The current operating mode of the pallet. + /// All pallet parameters may only be updated either by the root, or by the pallet owner. + type Parameter: MessagesParameter; + + /// Maximal number of messages that may be pruned during maintenance. Maintenance occurs + /// whenever new message is sent. The reason is that if you want to use lane, you should + /// be ready to pay for its maintenance. + type MaxMessagesToPruneAtOnce: Get; + /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the + /// relayer has delivered messages, but either confirmations haven't been delivered back to + /// the source chain, or we haven't received reward confirmations yet. /// - /// Depending on the mode either all, some, or no transactions will be allowed. - pub PalletOperatingMode get(fn operating_mode) config(): OperatingMode; - /// Map of lane id => inbound lane data. - pub InboundLanes: map hasher(blake2_128_concat) LaneId => InboundLaneData; - /// Map of lane id => outbound lane data. - pub OutboundLanes: map hasher(blake2_128_concat) LaneId => OutboundLaneData; - /// All queued outbound messages. - pub OutboundMessages: map hasher(blake2_128_concat) MessageKey => Option>; - } - add_extra_genesis { - config(phantom): sp_std::marker::PhantomData; - config(owner): Option; - build(|config| { - if let Some(ref owner) = config.owner { - >::put(owner); - } - }) - } -} - -decl_event!( - pub enum Event - where - AccountId = ::AccountId, - Parameter = >::Parameter, - { - /// Pallet parameter has been updated. - ParameterUpdated(Parameter), - /// Message has been accepted and is waiting to be delivered. - MessageAccepted(LaneId, MessageNonce), - /// Messages in the inclusive range have been delivered to the bridged chain. - MessagesDelivered(LaneId, DeliveredMessages), - /// Phantom member, never used. - Dummy(PhantomData<(AccountId, I)>), - } -); - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; - - /// Ensure runtime invariants. - fn on_runtime_upgrade() -> Weight { - let reads = T::MessageDeliveryAndDispatchPayment::initialize( - &Self::relayer_fund_account_id() - ); - T::DbWeight::get().reads(reads as u64) - } - + /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep + /// in mind that the same relayer account may take several (non-consecutive) entries in this + /// set. + type MaxUnrewardedRelayerEntriesAtInboundLane: Get; + /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the + /// message has been delivered, but either confirmations haven't been delivered back to the + /// source chain, or we haven't received reward confirmations for these messages yet. + /// + /// This constant limits difference between last message from last entry of the + /// `InboundLaneData::relayers` and first message at the first entry. + /// + /// There is no point of making this parameter lesser than + /// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries + /// will be limited by maximal number of messages. + /// + /// This value also represents maximal number of messages in single delivery transaction. + /// Transaction that is declaring more messages than this value, will be rejected. Even if + /// these messages are from different lanes. + type MaxUnconfirmedMessagesAtInboundLane: Get; + + /// Payload type of outbound messages. This payload is dispatched on the bridged chain. + type OutboundPayload: Parameter + Size; + /// Message fee type of outbound messages. This fee is paid on this chain. + type OutboundMessageFee: Default + + From + + PartialOrd + + Parameter + + SaturatingAdd + + Zero + + Copy; + + /// Payload type of inbound messages. This payload is dispatched on this chain. + type InboundPayload: Decode; + /// Message fee type of inbound messages. This fee is paid on the bridged chain. + type InboundMessageFee: Decode; + /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the + /// bridged chain. + type InboundRelayer: Parameter; + + /// A type which can be turned into an AccountId from a 256-bit hash. + /// + /// Used when deriving the shared relayer fund account. + type AccountIdConverter: sp_runtime::traits::Convert; + + // Types that are used by outbound_lane (on source chain). + + /// Target header chain. + type TargetHeaderChain: TargetHeaderChain; + /// Message payload verifier. + type LaneMessageVerifier: LaneMessageVerifier< + Self::AccountId, + Self::OutboundPayload, + Self::OutboundMessageFee, + >; + /// Message delivery payment. + type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment< + Self::AccountId, + Self::OutboundMessageFee, + >; + /// Handler for accepted messages. + type OnMessageAccepted: OnMessageAccepted; + /// Handler for delivered messages. + type OnDeliveryConfirmed: OnDeliveryConfirmed; + + // Types that are used by inbound_lane (on target chain). + + /// Source header chain, as it is represented on target chain. + type SourceHeaderChain: SourceHeaderChain; + /// Message dispatch. + type MessageDispatch: MessageDispatch< + Self::AccountId, + Self::InboundMessageFee, + DispatchPayload = Self::InboundPayload, + >; + } + + /// Shortcut to messages proof type for Config. + type MessagesProofOf = <>::SourceHeaderChain as SourceHeaderChain< + >::InboundMessageFee, + >>::MessagesProof; + /// Shortcut to messages delivery proof type for Config. + type MessagesDeliveryProofOf = + <>::TargetHeaderChain as TargetHeaderChain< + >::OutboundPayload, + ::AccountId, + >>::MessagesDeliveryProof; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::call] + impl, I: 'static> Pallet { /// Change `PalletOwner`. /// /// May only be called either by root, or by `PalletOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn set_owner(origin, new_owner: Option) { + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { ensure_owner_or_root::(origin)?; match new_owner { Some(new_owner) => { @@ -278,149 +229,104 @@ decl_module! { log::info!(target: "runtime::bridge-messages", "Removed Owner of pallet."); }, } + Ok(()) } /// Halt or resume all/some pallet operations. /// /// May only be called either by root, or by `PalletOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn set_operating_mode(origin, operating_mode: OperatingMode) { + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operating_mode( + origin: OriginFor, + operating_mode: OperatingMode, + ) -> DispatchResult { ensure_owner_or_root::(origin)?; - >::put(operating_mode); + PalletOperatingMode::::put(operating_mode); log::info!( target: "runtime::bridge-messages", "Setting messages pallet operating mode to {:?}.", operating_mode, ); + Ok(()) } /// Update pallet parameter. /// /// May only be called either by root, or by `PalletOwner`. /// - /// The weight is: single read for permissions check + 2 writes for parameter value and event. - #[weight = (T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational)] - pub fn update_pallet_parameter(origin, parameter: T::Parameter) { + /// The weight is: single read for permissions check + 2 writes for parameter value and + /// event. + #[pallet::weight((T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational))] + pub fn update_pallet_parameter( + origin: OriginFor, + parameter: T::Parameter, + ) -> DispatchResult { ensure_owner_or_root::(origin)?; parameter.save(); - Self::deposit_event(RawEvent::ParameterUpdated(parameter)); + Self::deposit_event(Event::ParameterUpdated(parameter)); + Ok(()) } /// Send message over lane. - #[weight = T::WeightInfo::send_message_weight(payload)] + #[pallet::weight(T::WeightInfo::send_message_weight(payload, T::DbWeight::get()))] pub fn send_message( - origin, + origin: OriginFor, lane_id: LaneId, payload: T::OutboundPayload, delivery_and_dispatch_fee: T::OutboundMessageFee, - ) -> DispatchResult { - ensure_normal_operating_mode::()?; - let submitter = origin.into().map_err(|_| BadOrigin)?; - - // let's first check if message can be delivered to target chain - T::TargetHeaderChain::verify_message(&payload) - .map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Message to lane {:?} is rejected by target chain: {:?}", - lane_id, - err, - ); - - Error::::MessageRejectedByChainVerifier - })?; - - // now let's enforce any additional lane rules - let mut lane = outbound_lane::(lane_id); - T::LaneMessageVerifier::verify_message( - &submitter, - &delivery_and_dispatch_fee, - &lane_id, - &lane.data(), - &payload, - ).map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Message to lane {:?} is rejected by lane verifier: {:?}", - lane_id, - err, - ); - - Error::::MessageRejectedByLaneVerifier - })?; - - // let's withdraw delivery and dispatch fee from submitter - T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( - &submitter, - &delivery_and_dispatch_fee, - &Self::relayer_fund_account_id(), - ).map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Message to lane {:?} is rejected because submitter {:?} is unable to pay fee {:?}: {:?}", - lane_id, - submitter, - delivery_and_dispatch_fee, - err, - ); - - Error::::FailedToWithdrawMessageFee - })?; - - // finally, save message in outbound storage and emit event - let encoded_payload = payload.encode(); - let encoded_payload_len = encoded_payload.len(); - let nonce = lane.send_message(MessageData { - payload: encoded_payload, - fee: delivery_and_dispatch_fee, - }); - lane.prune_messages(T::MaxMessagesToPruneAtOnce::get()); - - log::trace!( - target: "runtime::bridge-messages", - "Accepted message {} to lane {:?}. Message size: {:?}", - nonce, + ) -> DispatchResultWithPostInfo { + crate::send_message::( + origin.into().map_err(|_| BadOrigin)?, lane_id, - encoded_payload_len, - ); - - Self::deposit_event(RawEvent::MessageAccepted(lane_id, nonce)); - - Ok(()) + payload, + delivery_and_dispatch_fee, + ) + .map(|sent_message| PostDispatchInfo { + actual_weight: Some(sent_message.weight), + pays_fee: Pays::Yes, + }) } /// Pay additional fee for the message. - #[weight = T::WeightInfo::increase_message_fee()] + #[pallet::weight(T::WeightInfo::maximal_increase_message_fee())] pub fn increase_message_fee( - origin, + origin: OriginFor, lane_id: LaneId, nonce: MessageNonce, additional_fee: T::OutboundMessageFee, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { ensure_not_halted::()?; // if someone tries to pay for already-delivered message, we're rejecting this intention // (otherwise this additional fee will be locked forever in relayers fund) // - // if someone tries to pay for not-yet-sent message, we're rejeting this intention, or + // if someone tries to pay for not-yet-sent message, we're rejecting this intention, or // we're risking to have mess in the storage let lane = outbound_lane::(lane_id); - ensure!(nonce > lane.data().latest_received_nonce, Error::::MessageIsAlreadyDelivered); - ensure!(nonce <= lane.data().latest_generated_nonce, Error::::MessageIsNotYetSent); + ensure!( + nonce > lane.data().latest_received_nonce, + Error::::MessageIsAlreadyDelivered + ); + ensure!( + nonce <= lane.data().latest_generated_nonce, + Error::::MessageIsNotYetSent + ); // withdraw additional fee from submitter let submitter = origin.into().map_err(|_| BadOrigin)?; T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( &submitter, &additional_fee, - &Self::relayer_fund_account_id(), - ).map_err(|err| { + &relayer_fund_account_id::(), + ) + .map_err(|err| { log::trace!( target: "runtime::bridge-messages", - "Submitter {:?} can't pay additional fee {:?} for the message {:?}/{:?}: {:?}", + "Submitter {:?} can't pay additional fee {:?} for the message {:?}/{:?} to {:?}: {:?}", submitter, additional_fee, lane_id, nonce, + relayer_fund_account_id::(), err, ); @@ -429,17 +335,24 @@ decl_module! { // and finally update fee in the storage let message_key = MessageKey { lane_id, nonce }; - OutboundMessages::::mutate(message_key, |message_data| { + let message_size = OutboundMessages::::mutate(message_key, |message_data| { // saturating_add is fine here - overflow here means that someone controls all // chain funds, which shouldn't ever happen + `pay_delivery_and_dispatch_fee` // above will fail before we reach here - let message_data = message_data - .as_mut() - .expect("the message is sent and not yet delivered; so it is in the storage; qed"); + let message_data = message_data.as_mut().expect( + "the message is sent and not yet delivered; so it is in the storage; qed", + ); message_data.fee = message_data.fee.saturating_add(&additional_fee); + message_data.payload.len() }); - Ok(()) + // compute actual dispatch weight that depends on the stored message size + let actual_weight = sp_std::cmp::min( + T::WeightInfo::maximal_increase_message_fee(), + T::WeightInfo::increase_message_fee(message_size as _), + ); + + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) } /// Receive messages proof from bridged chain. @@ -447,9 +360,9 @@ decl_module! { /// The weight of the call assumes that the transaction always brings outbound lane /// state update. Because of that, the submitter (relayer) has no benefit of not including /// this data in the transaction, so reward confirmations lags should be minimal. - #[weight = T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight)] + #[pallet::weight(T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight))] pub fn receive_messages_proof( - origin, + origin: OriginFor, relayer_id_at_bridged_chain: T::InboundRelayer, proof: MessagesProofOf, messages_count: u32, @@ -467,8 +380,8 @@ decl_module! { // why do we need to know the weight of this (`receive_messages_proof`) call? Because // we may want to return some funds for not-dispatching (or partially dispatching) some // messages to the call origin (relayer). And this is done by returning actual weight - // from the call. But we only know dispatch weight of every messages. So to refund relayer - // because we have not dispatched Message, we need to: + // from the call. But we only know dispatch weight of every messages. So to refund + // relayer because we have not dispatched Message, we need to: // // ActualWeight = DeclaredWeight - Message.DispatchWeight // @@ -487,40 +400,20 @@ decl_module! { T::InboundMessageFee, T::InboundPayload, >(proof, messages_count) - .map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Rejecting invalid messages proof: {:?}", - err, - ); - - Error::::InvalidMessagesProof - })?; - - // verify that relayer is paying actual dispatch weight - let actual_dispatch_weight: Weight = messages - .values() - .map(|lane_messages| lane_messages - .messages - .iter() - .map(T::MessageDispatch::dispatch_weight) - .fold(0, |sum, weight| sum.saturating_add(&weight)) - ) - .fold(0, |sum, weight| sum.saturating_add(weight)); - if dispatch_weight < actual_dispatch_weight { + .map_err(|err| { log::trace!( target: "runtime::bridge-messages", - "Rejecting messages proof because of dispatch weight mismatch: declared={}, expected={}", - dispatch_weight, - actual_dispatch_weight, + "Rejecting invalid messages proof: {:?}", + err, ); - return Err(Error::::InvalidMessagesDispatchWeight.into()); - } + Error::::InvalidMessagesProof + })?; // dispatch messages and (optionally) update lane(s) state(s) let mut total_messages = 0; let mut valid_messages = 0; + let mut dispatch_weight_left = dispatch_weight; for (lane_id, lane_data) in messages { let mut lane = inbound_lane::(lane_id); @@ -539,8 +432,22 @@ decl_module! { for message in lane_data.messages { debug_assert_eq!(message.key.lane_id, lane_id); - total_messages += 1; + // ensure that relayer has declared enough weight for dispatching next message + // on this lane. We can't dispatch lane messages out-of-order, so if declared + // weight is not enough, let's move to next lane let dispatch_weight = T::MessageDispatch::dispatch_weight(&message); + if dispatch_weight > dispatch_weight_left { + log::trace!( + target: "runtime::bridge-messages", + "Cannot dispatch any more messages on lane {:?}. Weight: declared={}, left={}", + lane_id, + dispatch_weight, + dispatch_weight_left, + ); + break + } + total_messages += 1; + let receival_result = lane.receive_message::( &relayer_id_at_bridged_chain, &relayer_id_at_this_chain, @@ -557,24 +464,28 @@ decl_module! { let (unspent_weight, refund_pay_dispatch_fee) = match receival_result { ReceivalResult::Dispatched(dispatch_result) => { valid_messages += 1; - (dispatch_result.unspent_weight, !dispatch_result.dispatch_fee_paid_during_dispatch) + ( + dispatch_result.unspent_weight, + !dispatch_result.dispatch_fee_paid_during_dispatch, + ) }, - ReceivalResult::InvalidNonce - | ReceivalResult::TooManyUnrewardedRelayers - | ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true), + ReceivalResult::InvalidNonce | + ReceivalResult::TooManyUnrewardedRelayers | + ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true), }; - actual_weight = actual_weight - .saturating_sub(sp_std::cmp::min(unspent_weight, dispatch_weight)) - .saturating_sub( - // delivery call weight formula assumes that the fee is paid at - // this (target) chain. If the message is prepaid at the source - // chain, let's refund relayer with this extra cost. - if refund_pay_dispatch_fee { - T::WeightInfo::pay_inbound_dispatch_fee_overhead() - } else { - 0 - } - ); + + let unspent_weight = sp_std::cmp::min(unspent_weight, dispatch_weight); + dispatch_weight_left -= dispatch_weight - unspent_weight; + actual_weight = actual_weight.saturating_sub(unspent_weight).saturating_sub( + // delivery call weight formula assumes that the fee is paid at + // this (target) chain. If the message is prepaid at the source + // chain, let's refund relayer with this extra cost. + if refund_pay_dispatch_fee { + T::WeightInfo::pay_inbound_dispatch_fee_overhead() + } else { + 0 + }, + ); } } @@ -587,48 +498,86 @@ decl_module! { declared_weight, ); - Ok(PostDispatchInfo { - actual_weight: Some(actual_weight), - pays_fee: Pays::Yes, - }) + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) } /// Receive messages delivery proof from bridged chain. - #[weight = T::WeightInfo::receive_messages_delivery_proof_weight(proof, relayers_state)] + #[pallet::weight(T::WeightInfo::receive_messages_delivery_proof_weight( + proof, + relayers_state, + T::DbWeight::get(), + ))] pub fn receive_messages_delivery_proof( - origin, + origin: OriginFor, proof: MessagesDeliveryProofOf, relayers_state: UnrewardedRelayersState, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { ensure_not_halted::()?; + // why do we need to know the weight of this (`receive_messages_delivery_proof`) call? + // Because we may want to return some funds for messages that are not processed by the + // delivery callback, or if their actual processing weight is less than accounted by + // weight formula. So to refund relayer, we need to: + // + // ActualWeight = DeclaredWeight - UnspentCallbackWeight + // + // The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible + // to get pre-computed value (and it has been already computed by the executive). + let single_message_callback_overhead = + T::WeightInfo::single_message_callback_overhead(T::DbWeight::get()); + let declared_weight = T::WeightInfo::receive_messages_delivery_proof_weight( + &proof, + &relayers_state, + T::DbWeight::get(), + ); + let mut actual_weight = declared_weight; + let confirmation_relayer = ensure_signed(origin)?; - let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Rejecting invalid messages delivery proof: {:?}", - err, - ); + let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof) + .map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Rejecting invalid messages delivery proof: {:?}", + err, + ); - Error::::InvalidMessagesDeliveryProof - })?; + Error::::InvalidMessagesDeliveryProof + })?; // verify that the relayer has declared correct `lane_data::relayers` state - // (we only care about total number of entries and messages, because this affects call weight) + // (we only care about total number of entries and messages, because this affects call + // weight) ensure!( - total_unrewarded_messages(&lane_data.relayers) - .unwrap_or(MessageNonce::MAX) == relayers_state.total_messages - && lane_data.relayers.len() as MessageNonce == relayers_state.unrewarded_relayer_entries, + total_unrewarded_messages(&lane_data.relayers).unwrap_or(MessageNonce::MAX) == + relayers_state.total_messages && + lane_data.relayers.len() as MessageNonce == + relayers_state.unrewarded_relayer_entries, Error::::InvalidUnrewardedRelayersState ); // mark messages as delivered let mut lane = outbound_lane::(lane_id); - let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new(); let last_delivered_nonce = lane_data.last_delivered_nonce(); - let confirmed_messages = match lane.confirm_delivery(last_delivered_nonce, &lane_data.relayers) { - ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) => Some(confirmed_messages), + let confirmed_messages = match lane.confirm_delivery( + relayers_state.total_messages, + last_delivered_nonce, + &lane_data.relayers, + ) { + ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) => + Some(confirmed_messages), ReceivalConfirmationResult::NoNewConfirmations => None, + ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected( + to_confirm_messages_count, + ) => { + log::trace!( + target: "runtime::bridge-messages", + "Messages delivery proof contains too many messages to confirm: {} vs declared {}", + to_confirm_messages_count, + relayers_state.total_messages, + ); + + fail!(Error::::TryingToConfirmMoreMessagesThanExpected); + }, error => { log::trace!( target: "runtime::bridge-messages", @@ -639,40 +588,53 @@ decl_module! { fail!(Error::::InvalidUnrewardedRelayers); }, }; + if let Some(confirmed_messages) = confirmed_messages { // handle messages delivery confirmation - T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages); + let preliminary_callback_overhead = + relayers_state.total_messages.saturating_mul(single_message_callback_overhead); + let actual_callback_weight = + T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages); + match preliminary_callback_overhead.checked_sub(actual_callback_weight) { + Some(difference) if difference == 0 => (), + Some(difference) => { + log::trace!( + target: "runtime::bridge-messages", + "T::OnDeliveryConfirmed callback has spent less weight than expected. Refunding: \ + {} - {} = {}", + preliminary_callback_overhead, + actual_callback_weight, + difference, + ); + actual_weight = actual_weight.saturating_sub(difference); + }, + None => { + debug_assert!( + false, + "T::OnDeliveryConfirmed callback consumed too much weight." + ); + log::error!( + target: "runtime::bridge-messages", + "T::OnDeliveryConfirmed callback has spent more weight that it is allowed to: \ + {} vs {}", + preliminary_callback_overhead, + actual_callback_weight, + ); + }, + } // emit 'delivered' event let received_range = confirmed_messages.begin..=confirmed_messages.end; - Self::deposit_event(RawEvent::MessagesDelivered(lane_id, confirmed_messages)); - - // remember to reward relayers that have delivered messages - // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain - for entry in lane_data.relayers { - let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start()); - let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end()); - - // loop won't proceed if current entry is ahead of received range (begin > end). - // this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain - let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default(); - for nonce in nonce_begin..nonce_end + 1 { - let message_data = OutboundMessages::::get(MessageKey { - lane_id, - nonce, - }).expect("message was just confirmed; we never prune unconfirmed messages; qed"); - relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee); - relayer_reward.messages += 1; - } - } - } + Self::deposit_event(Event::MessagesDelivered(lane_id, confirmed_messages)); - // if some new messages have been confirmed, reward relayers - if !relayers_rewards.is_empty() { - let relayer_fund_account = Self::relayer_fund_account_id(); + // if some new messages have been confirmed, reward relayers + let relayer_fund_account = + relayer_fund_account_id::(); >::MessageDeliveryAndDispatchPayment::pay_relayers_rewards( + lane_id, + lane_data.relayers, &confirmation_relayer, - relayers_rewards, + &received_range, &relayer_fund_account, ); } @@ -684,124 +646,359 @@ decl_module! { lane_id, ); - Ok(()) + Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) } } -} -impl, I: Instance> Pallet { - /// Get stored data of the outbound message with given nonce. - pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option> { - OutboundMessages::::get(MessageKey { lane_id: lane, nonce }) + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// Pallet parameter has been updated. + ParameterUpdated(T::Parameter), + /// Message has been accepted and is waiting to be delivered. + MessageAccepted(LaneId, MessageNonce), + /// Messages in the inclusive range have been delivered to the bridged chain. + MessagesDelivered(LaneId, DeliveredMessages), } - /// Get nonce of latest generated message at given outbound lane. - pub fn outbound_latest_generated_nonce(lane: LaneId) -> MessageNonce { - OutboundLanes::::get(&lane).latest_generated_nonce + #[pallet::error] + pub enum Error { + /// All pallet operations are halted. + Halted, + /// Message has been treated as invalid by chain verifier. + MessageRejectedByChainVerifier, + /// Message has been treated as invalid by lane verifier. + MessageRejectedByLaneVerifier, + /// Submitter has failed to pay fee for delivering and dispatching messages. + FailedToWithdrawMessageFee, + /// The transaction brings too many messages. + TooManyMessagesInTheProof, + /// Invalid messages has been submitted. + InvalidMessagesProof, + /// Invalid messages delivery proof has been submitted. + InvalidMessagesDeliveryProof, + /// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane). + InvalidUnrewardedRelayers, + /// The relayer has declared invalid unrewarded relayers state in the + /// `receive_messages_delivery_proof` call. + InvalidUnrewardedRelayersState, + /// The message someone is trying to work with (i.e. increase fee) is already-delivered. + MessageIsAlreadyDelivered, + /// The message someone is trying to work with (i.e. increase fee) is not yet sent. + MessageIsNotYetSent, + /// The number of actually confirmed messages is going to be larger than the number of + /// messages in the proof. This may mean that this or bridged chain storage is corrupted. + TryingToConfirmMoreMessagesThanExpected, } - /// Get nonce of latest confirmed message at given outbound lane. - pub fn outbound_latest_received_nonce(lane: LaneId) -> MessageNonce { - OutboundLanes::::get(&lane).latest_received_nonce + /// Optional pallet owner. + /// + /// Pallet owner has a right to halt all pallet operations and then resume it. If it is + /// `None`, then there are no direct ways to halt/resume pallet operations, but other + /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt + /// flag directly or call the `halt_operations`). + #[pallet::storage] + #[pallet::getter(fn module_owner)] + pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId>; + + /// The current operating mode of the pallet. + /// + /// Depending on the mode either all, some, or no transactions will be allowed. + #[pallet::storage] + #[pallet::getter(fn operating_mode)] + pub type PalletOperatingMode, I: 'static = ()> = + StorageValue<_, OperatingMode, ValueQuery>; + + /// Map of lane id => inbound lane data. + #[pallet::storage] + pub type InboundLanes, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, LaneId, InboundLaneData, ValueQuery>; + + /// Map of lane id => outbound lane data. + #[pallet::storage] + pub type OutboundLanes, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, LaneId, OutboundLaneData, ValueQuery>; + + /// All queued outbound messages. + #[pallet::storage] + pub type OutboundMessages, I: 'static = ()> = + StorageMap<_, Blake2_128Concat, MessageKey, MessageData>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + /// Initial pallet operating mode. + pub operating_mode: OperatingMode, + /// Initial pallet owner. + pub owner: Option, + /// Dummy marker. + pub phantom: sp_std::marker::PhantomData, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { + operating_mode: Default::default(), + owner: Default::default(), + phantom: Default::default(), + } + } } - /// Get nonce of latest received message at given inbound lane. - pub fn inbound_latest_received_nonce(lane: LaneId) -> MessageNonce { - InboundLanes::::get(&lane).last_delivered_nonce() + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + PalletOperatingMode::::put(&self.operating_mode); + if let Some(ref owner) = self.owner { + PalletOwner::::put(owner); + } + } } - /// Get nonce of latest confirmed message at given inbound lane. - pub fn inbound_latest_confirmed_nonce(lane: LaneId) -> MessageNonce { - InboundLanes::::get(&lane).last_confirmed_nonce - } + impl, I: 'static> Pallet { + /// Get stored data of the outbound message with given nonce. + pub fn outbound_message_data( + lane: LaneId, + nonce: MessageNonce, + ) -> Option> { + OutboundMessages::::get(MessageKey { lane_id: lane, nonce }) + } - /// Get state of unrewarded relayers set. - pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState { - let relayers = InboundLanes::::get(&lane).relayers; - bp_messages::UnrewardedRelayersState { - unrewarded_relayer_entries: relayers.len() as _, - messages_in_oldest_entry: relayers - .front() - .map(|entry| 1 + entry.messages.end - entry.messages.begin) - .unwrap_or(0), - total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), + /// Get nonce of the latest generated message at given outbound lane. + pub fn outbound_latest_generated_nonce(lane: LaneId) -> MessageNonce { + OutboundLanes::::get(&lane).latest_generated_nonce } - } - /// AccountId of the shared relayer fund account. - /// - /// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending - /// on the implementation it can be used to store relayers rewards. - /// See [InstantCurrencyPayments] for a concrete implementation. - pub fn relayer_fund_account_id() -> T::AccountId { - use sp_runtime::traits::Convert; - let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID); - T::AccountIdConverter::convert(encoded_id) + /// Get nonce of the latest confirmed message at given outbound lane. + pub fn outbound_latest_received_nonce(lane: LaneId) -> MessageNonce { + OutboundLanes::::get(&lane).latest_received_nonce + } + + /// Get nonce of the latest received message at given inbound lane. + pub fn inbound_latest_received_nonce(lane: LaneId) -> MessageNonce { + InboundLanes::::get(&lane).last_delivered_nonce() + } + + /// Get nonce of the latest confirmed message at given inbound lane. + pub fn inbound_latest_confirmed_nonce(lane: LaneId) -> MessageNonce { + InboundLanes::::get(&lane).last_confirmed_nonce + } + + /// Get state of unrewarded relayers set. + pub fn inbound_unrewarded_relayers_state( + lane: bp_messages::LaneId, + ) -> bp_messages::UnrewardedRelayersState { + let relayers = InboundLanes::::get(&lane).relayers; + bp_messages::UnrewardedRelayersState { + unrewarded_relayer_entries: relayers.len() as _, + messages_in_oldest_entry: relayers + .front() + .map(|entry| 1 + entry.messages.end - entry.messages.begin) + .unwrap_or(0), + total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), + } + } } } /// Getting storage keys for messages and lanes states. These keys are normally used when building /// messages and lanes states proofs. -/// -/// Keep in mind that all functions in this module are **NOT** using passed `T` argument, so any -/// runtime can be passed. E.g. if you're verifying proof from Runtime1 in Runtime2, you only have -/// access to Runtime2 and you may pass it to the functions, where required. This is because our -/// maps are not using any Runtime-specific data in the keys. -/// -/// On the other side, passing correct instance is required. So if proof has been crafted by the -/// Instance1, you should verify it using Instance1. This is inconvenient if you're using different -/// instances on different sides of the bridge. I.e. in Runtime1 it is Instance2, but on Runtime2 -/// it is Instance42. But there's no other way, but to craft this key manually (which is what I'm -/// trying to avoid here) - by using strings like "Instance2", "OutboundMessages", etc. pub mod storage_keys { use super::*; - use frame_support::{traits::Instance, StorageHasher}; use sp_core::storage::StorageKey; /// Storage key of the outbound message in the runtime storage. - pub fn message_key(lane: &LaneId, nonce: MessageNonce) -> StorageKey { - storage_map_final_key::("OutboundMessages", &MessageKey { lane_id: *lane, nonce }.encode()) + pub fn message_key(pallet_prefix: &str, lane: &LaneId, nonce: MessageNonce) -> StorageKey { + bp_runtime::storage_map_final_key_blake2_128concat( + pallet_prefix, + "OutboundMessages", + &MessageKey { lane_id: *lane, nonce }.encode(), + ) } /// Storage key of the outbound message lane state in the runtime storage. - pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { - storage_map_final_key::("OutboundLanes", lane) + pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { + bp_runtime::storage_map_final_key_blake2_128concat(pallet_prefix, "OutboundLanes", lane) } /// Storage key of the inbound message lane state in the runtime storage. - pub fn inbound_lane_data_key(lane: &LaneId) -> StorageKey { - storage_map_final_key::("InboundLanes", lane) + pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { + bp_runtime::storage_map_final_key_blake2_128concat(pallet_prefix, "InboundLanes", lane) } +} - /// This is a copypaste of the `frame_support::storage::generator::StorageMap::storage_map_final_key`. - fn storage_map_final_key(map_name: &str, key: &[u8]) -> StorageKey { - let module_prefix_hashed = frame_support::Twox128::hash(I::PREFIX.as_bytes()); - let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes()); - let key_hashed = frame_support::Blake2_128Concat::hash(key); +/// AccountId of the shared relayer fund account. +/// +/// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending +/// on the implementation it can be used to store relayers rewards. +/// See [`InstantCurrencyPayments`] for a concrete implementation. +pub fn relayer_fund_account_id>( +) -> AccountId { + let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID); + AccountIdConverter::convert(encoded_id) +} - let mut final_key = - Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len()); +impl + bp_messages::source_chain::MessagesBridge< + T::AccountId, + T::OutboundMessageFee, + T::OutboundPayload, + > for Pallet +where + T: Config, + I: 'static, +{ + type Error = sp_runtime::DispatchErrorWithPostInfo; + + fn send_message( + sender: bp_messages::source_chain::Sender, + lane: LaneId, + message: T::OutboundPayload, + delivery_and_dispatch_fee: T::OutboundMessageFee, + ) -> Result { + crate::send_message::(sender, lane, message, delivery_and_dispatch_fee) + } +} - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key_hashed.as_ref()); +/// Function that actually sends message. +fn send_message, I: 'static>( + submitter: bp_messages::source_chain::Sender, + lane_id: LaneId, + payload: T::OutboundPayload, + delivery_and_dispatch_fee: T::OutboundMessageFee, +) -> sp_std::result::Result< + SendMessageArtifacts, + sp_runtime::DispatchErrorWithPostInfo, +> { + ensure_normal_operating_mode::()?; + + // initially, actual (post-dispatch) weight is equal to pre-dispatch weight + let mut actual_weight = T::WeightInfo::send_message_weight(&payload, T::DbWeight::get()); + + // let's first check if message can be delivered to target chain + T::TargetHeaderChain::verify_message(&payload).map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Message to lane {:?} is rejected by target chain: {:?}", + lane_id, + err, + ); + + Error::::MessageRejectedByChainVerifier + })?; + + // now let's enforce any additional lane rules + let mut lane = outbound_lane::(lane_id); + T::LaneMessageVerifier::verify_message( + &submitter, + &delivery_and_dispatch_fee, + &lane_id, + &lane.data(), + &payload, + ) + .map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Message to lane {:?} is rejected by lane verifier: {:?}", + lane_id, + err, + ); - StorageKey(final_key) + Error::::MessageRejectedByLaneVerifier + })?; + + // let's withdraw delivery and dispatch fee from submitter + T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( + &submitter, + &delivery_and_dispatch_fee, + &relayer_fund_account_id::(), + ) + .map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Message to lane {:?} is rejected because submitter {:?} is unable to pay fee {:?}: {:?}", + lane_id, + submitter, + delivery_and_dispatch_fee, + err, + ); + + Error::::FailedToWithdrawMessageFee + })?; + + // finally, save message in outbound storage and emit event + let encoded_payload = payload.encode(); + let encoded_payload_len = encoded_payload.len(); + let nonce = + lane.send_message(MessageData { payload: encoded_payload, fee: delivery_and_dispatch_fee }); + // Guaranteed to be called outside only when the message is accepted. + // We assume that the maximum weight call back used is `single_message_callback_overhead`, so do + // not perform complex db operation in callback. If you want to, put these magic logic in + // outside pallet and control the weight there. + let single_message_callback_overhead = + T::WeightInfo::single_message_callback_overhead(T::DbWeight::get()); + let actual_callback_weight = T::OnMessageAccepted::on_messages_accepted(&lane_id, &nonce); + match single_message_callback_overhead.checked_sub(actual_callback_weight) { + Some(difference) if difference == 0 => (), + Some(difference) => { + log::trace!( + target: "runtime::bridge-messages", + "T::OnMessageAccepted callback has spent less weight than expected. Refunding: \ + {} - {} = {}", + single_message_callback_overhead, + actual_callback_weight, + difference, + ); + actual_weight = actual_weight.saturating_sub(difference); + }, + None => { + debug_assert!(false, "T::OnMessageAccepted callback consumed too much weight."); + log::error!( + target: "runtime::bridge-messages", + "T::OnMessageAccepted callback has spent more weight that it is allowed to: \ + {} vs {}", + single_message_callback_overhead, + actual_callback_weight, + ); + }, } + + // message sender pays for pruning at most `MaxMessagesToPruneAtOnce` messages + // the cost of pruning every message is roughly single db write + // => lets refund sender if less than `MaxMessagesToPruneAtOnce` messages pruned + let max_messages_to_prune = T::MaxMessagesToPruneAtOnce::get(); + let pruned_messages = lane.prune_messages(max_messages_to_prune); + if let Some(extra_messages) = max_messages_to_prune.checked_sub(pruned_messages) { + actual_weight = actual_weight.saturating_sub(T::DbWeight::get().writes(extra_messages)); + } + + log::trace!( + target: "runtime::bridge-messages", + "Accepted message {} to lane {:?}. Message size: {:?}", + nonce, + lane_id, + encoded_payload_len, + ); + + Pallet::::deposit_event(Event::MessageAccepted(lane_id, nonce)); + + Ok(SendMessageArtifacts { nonce, weight: actual_weight }) } /// Ensure that the origin is either root, or `PalletOwner`. -fn ensure_owner_or_root, I: Instance>(origin: T::Origin) -> Result<(), BadOrigin> { +fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { match origin.into() { Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) if Some(signer) == Pallet::::module_owner().as_ref() => Ok(()), + Ok(RawOrigin::Signed(ref signer)) + if Some(signer) == Pallet::::module_owner().as_ref() => + Ok(()), _ => Err(BadOrigin), } } /// Ensure that the pallet is in normal operational mode. -fn ensure_normal_operating_mode, I: Instance>() -> Result<(), Error> { - if PalletOperatingMode::::get() != OperatingMode::Normal { +fn ensure_normal_operating_mode, I: 'static>() -> Result<(), Error> { + if PalletOperatingMode::::get() != OperatingMode::Normal { Err(Error::::Halted) } else { Ok(()) @@ -809,8 +1006,8 @@ fn ensure_normal_operating_mode, I: Instance>() -> Result<(), Error } /// Ensure that the pallet is not halted. -fn ensure_not_halted, I: Instance>() -> Result<(), Error> { - if PalletOperatingMode::::get() == OperatingMode::Halted { +fn ensure_not_halted, I: 'static>() -> Result<(), Error> { + if PalletOperatingMode::::get() == OperatingMode::Halted { Err(Error::::Halted) } else { Ok(()) @@ -818,12 +1015,16 @@ fn ensure_not_halted, I: Instance>() -> Result<(), Error> { } /// Creates new inbound lane object, backed by runtime storage. -fn inbound_lane, I: Instance>(lane_id: LaneId) -> InboundLane> { +fn inbound_lane, I: 'static>( + lane_id: LaneId, +) -> InboundLane> { InboundLane::new(inbound_lane_storage::(lane_id)) } /// Creates new runtime inbound lane storage. -fn inbound_lane_storage, I: Instance>(lane_id: LaneId) -> RuntimeInboundLaneStorage { +fn inbound_lane_storage, I: 'static>( + lane_id: LaneId, +) -> RuntimeInboundLaneStorage { RuntimeInboundLaneStorage { lane_id, cached_data: RefCell::new(None), @@ -832,21 +1033,20 @@ fn inbound_lane_storage, I: Instance>(lane_id: LaneId) -> RuntimeIn } /// Creates new outbound lane object, backed by runtime storage. -fn outbound_lane, I: Instance>(lane_id: LaneId) -> OutboundLane> { - OutboundLane::new(RuntimeOutboundLaneStorage { - lane_id, - _phantom: Default::default(), - }) +fn outbound_lane, I: 'static>( + lane_id: LaneId, +) -> OutboundLane> { + OutboundLane::new(RuntimeOutboundLaneStorage { lane_id, _phantom: Default::default() }) } /// Runtime inbound lane storage. -struct RuntimeInboundLaneStorage, I = DefaultInstance> { +struct RuntimeInboundLaneStorage, I: 'static = ()> { lane_id: LaneId, cached_data: RefCell>>, _phantom: PhantomData, } -impl, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage { +impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage { type MessageFee = T::InboundMessageFee; type Relayer = T::InboundRelayer; @@ -872,7 +1072,7 @@ impl, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage we have no recursive borrows; qed", ) = Some(data.clone()); data - } + }, } } @@ -886,12 +1086,12 @@ impl, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage } /// Runtime outbound lane storage. -struct RuntimeOutboundLaneStorage { +struct RuntimeOutboundLaneStorage { lane_id: LaneId, _phantom: PhantomData<(T, I)>, } -impl, I: Instance> OutboundLaneStorage for RuntimeOutboundLaneStorage { +impl, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorage { type MessageFee = T::OutboundMessageFee; fn id(&self) -> LaneId { @@ -899,36 +1099,28 @@ impl, I: Instance> OutboundLaneStorage for RuntimeOutboundLaneStora } fn data(&self) -> OutboundLaneData { - OutboundLanes::::get(&self.lane_id) + OutboundLanes::::get(&self.lane_id) } fn set_data(&mut self, data: OutboundLaneData) { - OutboundLanes::::insert(&self.lane_id, data) + OutboundLanes::::insert(&self.lane_id, data) } #[cfg(test)] fn message(&self, nonce: &MessageNonce) -> Option> { - OutboundMessages::::get(MessageKey { - lane_id: self.lane_id, - nonce: *nonce, - }) + OutboundMessages::::get(MessageKey { lane_id: self.lane_id, nonce: *nonce }) } - fn save_message(&mut self, nonce: MessageNonce, mesage_data: MessageData) { - OutboundMessages::::insert( - MessageKey { - lane_id: self.lane_id, - nonce, - }, - mesage_data, - ); + fn save_message( + &mut self, + nonce: MessageNonce, + mesage_data: MessageData, + ) { + OutboundMessages::::insert(MessageKey { lane_id: self.lane_id, nonce }, mesage_data); } fn remove_message(&mut self, nonce: &MessageNonce) { - OutboundMessages::::remove(MessageKey { - lane_id: self.lane_id, - nonce: *nonce, - }); + OutboundMessages::::remove(MessageKey { lane_id: self.lane_id, nonce: *nonce }); } } @@ -961,12 +1153,13 @@ mod tests { use super::*; use crate::mock::{ message, message_payload, run_test, unrewarded_relayer, Event as TestEvent, Origin, - TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof, - TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, - TEST_RELAYER_A, TEST_RELAYER_B, + TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter, + TestMessagesProof, TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2, + TestOnMessageAccepted, TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, + REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, }; use bp_messages::{UnrewardedRelayer, UnrewardedRelayersState}; - use frame_support::{assert_noop, assert_ok}; + use frame_support::{assert_noop, assert_ok, weights::Weight}; use frame_system::{EventRecord, Pallet as System, Phase}; use hex_literal::hex; use sp_runtime::DispatchError; @@ -976,26 +1169,27 @@ mod tests { System::::reset_events(); } - fn send_regular_message() { + fn send_regular_message() -> Weight { get_ready_for_events(); - let message_nonce = outbound_lane::(TEST_LANE_ID) - .data() - .latest_generated_nonce - + 1; - assert_ok!(Pallet::::send_message( + let message_nonce = + outbound_lane::(TEST_LANE_ID).data().latest_generated_nonce + 1; + let weight = Pallet::::send_message( Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, REGULAR_PAYLOAD.declared_weight, - )); + ) + .expect("send_message has failed") + .actual_weight + .expect("send_message always returns Some"); // check event with assigned nonce assert_eq!( System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::Messages(RawEvent::MessageAccepted(TEST_LANE_ID, message_nonce)), + event: TestEvent::Messages(Event::MessageAccepted(TEST_LANE_ID, message_nonce)), topics: vec![], }], ); @@ -1005,6 +1199,8 @@ mod tests { 1, REGULAR_PAYLOAD.declared_weight )); + + weight } fn receive_messages_delivery_proof() { @@ -1036,7 +1232,7 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::Messages(RawEvent::MessagesDelivered( + event: TestEvent::Messages(Event::MessagesDelivered( TEST_LANE_ID, DeliveredMessages::new(1, true), )), @@ -1140,7 +1336,7 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::Messages(RawEvent::ParameterUpdated(parameter)), + event: TestEvent::Messages(Event::ParameterUpdated(parameter)), topics: vec![], }], ); @@ -1164,7 +1360,7 @@ mod tests { System::::events(), vec![EventRecord { phase: Phase::Initialization, - event: TestEvent::Messages(RawEvent::ParameterUpdated(parameter)), + event: TestEvent::Messages(Event::ParameterUpdated(parameter)), topics: vec![], }], ); @@ -1202,7 +1398,8 @@ mod tests { // 1:1 conversion that we use by default for testnets let rialto_token = 1u64; - let rialto_token_in_millau_tokens = TokenConversionRate::get().saturating_mul_int(rialto_token); + let rialto_token_in_millau_tokens = + TokenConversionRate::get().saturating_mul_int(rialto_token); assert_eq!(rialto_token_in_millau_tokens, 1); // let's say conversion rate is 1:1.7 @@ -1225,7 +1422,7 @@ mod tests { // send message first to be able to check that delivery_proof fails later send_regular_message(); - PalletOperatingMode::::put(OperatingMode::Halted); + PalletOperatingMode::::put(OperatingMode::Halted); assert_noop!( Pallet::::send_message( @@ -1234,12 +1431,12 @@ mod tests { REGULAR_PAYLOAD, REGULAR_PAYLOAD.declared_weight, ), - Error::::Halted, + Error::::Halted, ); assert_noop!( Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1,), - Error::::Halted, + Error::::Halted, ); assert_noop!( @@ -1250,7 +1447,7 @@ mod tests { 1, REGULAR_PAYLOAD.declared_weight, ), - Error::::Halted, + Error::::Halted, ); assert_noop!( @@ -1260,12 +1457,18 @@ mod tests { TEST_LANE_ID, InboundLaneData { last_confirmed_nonce: 1, - ..Default::default() + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] + .into_iter() + .collect(), }, ))), - Default::default(), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + }, ), - Error::::Halted, + Error::::Halted, ); }); } @@ -1276,7 +1479,7 @@ mod tests { // send message first to be able to check that delivery_proof fails later send_regular_message(); - PalletOperatingMode::::put(OperatingMode::RejectingOutboundMessages); + PalletOperatingMode::::put(OperatingMode::RejectingOutboundMessages); assert_noop!( Pallet::::send_message( @@ -1285,7 +1488,7 @@ mod tests { REGULAR_PAYLOAD, REGULAR_PAYLOAD.declared_weight, ), - Error::::Halted, + Error::::Halted, ); assert_ok!(Pallet::::increase_message_fee( @@ -1309,10 +1512,16 @@ mod tests { TEST_LANE_ID, InboundLaneData { last_confirmed_nonce: 1, - ..Default::default() + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] + .into_iter() + .collect(), }, ))), - Default::default(), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + }, )); }); } @@ -1335,7 +1544,7 @@ mod tests { PAYLOAD_REJECTED_BY_TARGET_CHAIN, PAYLOAD_REJECTED_BY_TARGET_CHAIN.declared_weight ), - Error::::MessageRejectedByChainVerifier, + Error::::MessageRejectedByChainVerifier, ); }); } @@ -1345,8 +1554,13 @@ mod tests { run_test(|| { // messages with zero fee are rejected by lane verifier assert_noop!( - Pallet::::send_message(Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 0), - Error::::MessageRejectedByLaneVerifier, + Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + REGULAR_PAYLOAD, + 0 + ), + Error::::MessageRejectedByLaneVerifier, ); }); } @@ -1362,7 +1576,7 @@ mod tests { REGULAR_PAYLOAD, REGULAR_PAYLOAD.declared_weight ), - Error::::FailedToWithdrawMessageFee, + Error::::FailedToWithdrawMessageFee, ); }); } @@ -1386,7 +1600,7 @@ mod tests { fn receive_messages_proof_updates_confirmed_message_nonce() { run_test(|| { // say we have received 10 messages && last confirmed message is 8 - InboundLanes::::insert( + InboundLanes::::insert( TEST_LANE_ID, InboundLaneData { last_confirmed_nonce: 8, @@ -1408,11 +1622,10 @@ mod tests { ); // message proof includes outbound lane state with latest confirmed message updated to 9 - let mut message_proof: TestMessagesProof = Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); - message_proof.result.as_mut().unwrap()[0].1.lane_state = Some(OutboundLaneData { - latest_received_nonce: 9, - ..Default::default() - }); + let mut message_proof: TestMessagesProof = + Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); + message_proof.result.as_mut().unwrap()[0].1.lane_state = + Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() }); assert_ok!(Pallet::::receive_messages_proof( Origin::signed(1), @@ -1446,18 +1659,16 @@ mod tests { } #[test] - fn receive_messages_proof_rejects_invalid_dispatch_weight() { + fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() { run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight - 1, - ), - Error::::InvalidMessagesDispatchWeight, - ); + assert_ok!(Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), + 1, + REGULAR_PAYLOAD.declared_weight - 1, + )); + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); }); } @@ -1465,14 +1676,14 @@ mod tests { fn receive_messages_proof_rejects_invalid_proof() { run_test(|| { assert_noop!( - Pallet::::receive_messages_proof( + Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Err(()).into(), 1, 0, ), - Error::::InvalidMessagesProof, + Error::::InvalidMessagesProof, ); }); } @@ -1481,14 +1692,14 @@ mod tests { fn receive_messages_proof_rejects_proof_with_too_many_messages() { run_test(|| { assert_noop!( - Pallet::::receive_messages_proof( + Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), u32::MAX, 0, ), - Error::::TooManyMessagesInTheProof, + Error::::TooManyMessagesInTheProof, ); }); } @@ -1500,7 +1711,7 @@ mod tests { receive_messages_delivery_proof(); assert_eq!( - OutboundLanes::::get(&TEST_LANE_ID).latest_received_nonce, + OutboundLanes::::get(&TEST_LANE_ID).latest_received_nonce, 1, ); }); @@ -1528,7 +1739,9 @@ mod tests { TestMessagesDeliveryProof(Ok(( TEST_LANE_ID, InboundLaneData { - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(), + relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] + .into_iter() + .collect(), ..Default::default() } ))), @@ -1538,16 +1751,11 @@ mod tests { ..Default::default() }, )); - assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_A, - 1000 - )); - assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_B, - 2000 - )); + assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000)); + assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000)); - // this reports delivery of both message 1 and message 2 => reward is paid only to TEST_RELAYER_B + // this reports delivery of both message 1 and message 2 => reward is paid only to + // TEST_RELAYER_B assert_ok!(Pallet::::receive_messages_delivery_proof( Origin::signed(1), TestMessagesDeliveryProof(Ok(( @@ -1568,14 +1776,8 @@ mod tests { ..Default::default() }, )); - assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_A, - 1000 - )); - assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_B, - 2000 - )); + assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000)); + assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000)); }); } @@ -1588,7 +1790,7 @@ mod tests { TestMessagesDeliveryProof(Err(())), Default::default(), ), - Error::::InvalidMessagesDeliveryProof, + Error::::InvalidMessagesDeliveryProof, ); }); } @@ -1596,7 +1798,7 @@ mod tests { #[test] fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { run_test(|| { - // when number of relayers entires is invalid + // when number of relayers entries is invalid assert_noop!( Pallet::::receive_messages_delivery_proof( Origin::signed(1), @@ -1618,7 +1820,7 @@ mod tests { ..Default::default() }, ), - Error::::InvalidUnrewardedRelayersState, + Error::::InvalidUnrewardedRelayersState, ); // when number of messages is invalid @@ -1643,7 +1845,7 @@ mod tests { ..Default::default() }, ), - Error::::InvalidUnrewardedRelayersState, + Error::::InvalidUnrewardedRelayersState, ); }); } @@ -1654,7 +1856,7 @@ mod tests { let mut invalid_message = message(1, REGULAR_PAYLOAD); invalid_message.data.payload = Vec::new(); - assert_ok!(Pallet::::receive_messages_proof( + assert_ok!(Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, Ok(vec![invalid_message]).into(), @@ -1662,10 +1864,7 @@ mod tests { 0, // weight may be zero in this case (all messages are improperly encoded) )); - assert_eq!( - InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), - 1, - ); + assert_eq!(InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), 1,); }); } @@ -1675,31 +1874,26 @@ mod tests { let mut invalid_message = message(2, REGULAR_PAYLOAD); invalid_message.data.payload = Vec::new(); - assert_ok!(Pallet::::receive_messages_proof( + assert_ok!(Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, - Ok(vec![ - message(1, REGULAR_PAYLOAD), - invalid_message, - message(3, REGULAR_PAYLOAD), - ]) + Ok( + vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),] + ) .into(), 3, REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, )); - assert_eq!( - InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), - 3, - ); + assert_eq!(InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), 3,); }); } #[test] fn storage_message_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking all - // previously crafted messages proofs. - let storage_key = storage_keys::message_key::(&*b"test", 42).0; + // If this test fails, then something has been changed in module storage that is breaking + // all previously crafted messages proofs. + let storage_key = storage_keys::message_key("BridgeMessages", &*b"test", 42).0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), @@ -1710,9 +1904,9 @@ mod tests { #[test] fn outbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking all - // previously crafted outbound lane state proofs. - let storage_key = storage_keys::outbound_lane_data_key::(&*b"test").0; + // If this test fails, then something has been changed in module storage that is breaking + // all previously crafted outbound lane state proofs. + let storage_key = storage_keys::outbound_lane_data_key("BridgeMessages", &*b"test").0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), @@ -1723,9 +1917,9 @@ mod tests { #[test] fn inbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking all - // previously crafted inbound lane state proofs. - let storage_key = storage_keys::inbound_lane_data_key::(&*b"test").0; + // If this test fails, then something has been changed in module storage that is breaking + // all previously crafted inbound lane state proofs. + let storage_key = storage_keys::inbound_lane_data_key("BridgeMessages", &*b"test").0; assert_eq!( storage_key, hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), @@ -1739,19 +1933,17 @@ mod tests { run_test(|| { let message1 = message(1, message_payload(0, Weight::MAX / 2)); let message2 = message(2, message_payload(0, Weight::MAX / 2)); - let message3 = message(2, message_payload(0, Weight::MAX / 2)); + let message3 = message(3, message_payload(0, Weight::MAX / 2)); - assert_noop!( - Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - // this may cause overflow if source chain storage is invalid - Ok(vec![message1, message2, message3]).into(), - 3, - 100, - ), - Error::::InvalidMessagesDispatchWeight, - ); + assert_ok!(Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + // this may cause overflow if source chain storage is invalid + Ok(vec![message1, message2, message3]).into(), + 3, + Weight::MAX, + )); + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 2); }); } @@ -1762,8 +1954,13 @@ mod tests { receive_messages_delivery_proof(); assert_noop!( - Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), - Error::::MessageIsAlreadyDelivered, + Pallet::::increase_message_fee( + Origin::signed(1), + TEST_LANE_ID, + 1, + 100, + ), + Error::::MessageIsAlreadyDelivered, ); }); } @@ -1772,8 +1969,13 @@ mod tests { fn increase_message_fee_fails_if_message_is_not_yet_sent() { run_test(|| { assert_noop!( - Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), - Error::::MessageIsNotYetSent, + Pallet::::increase_message_fee( + Origin::signed(1), + TEST_LANE_ID, + 1, + 100, + ), + Error::::MessageIsNotYetSent, ); }); } @@ -1786,8 +1988,13 @@ mod tests { TestMessageDeliveryAndDispatchPayment::reject_payments(); assert_noop!( - Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), - Error::::FailedToWithdrawMessageFee, + Pallet::::increase_message_fee( + Origin::signed(1), + TEST_LANE_ID, + 1, + 100, + ), + Error::::FailedToWithdrawMessageFee, ); }); } @@ -1797,7 +2004,7 @@ mod tests { run_test(|| { send_regular_message(); - assert_ok!(Pallet::::increase_message_fee( + assert_ok!(Pallet::::increase_message_fee( Origin::signed(1), TEST_LANE_ID, 1, @@ -1820,11 +2027,12 @@ mod tests { payload.dispatch_result.dispatch_fee_paid_during_dispatch = !is_prepaid; let proof = Ok(vec![message(nonce, payload)]).into(); let messages_count = 1; - let pre_dispatch_weight = ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); + let pre_dispatch_weight = + ::WeightInfo::receive_messages_proof_weight( + &proof, + messages_count, + REGULAR_PAYLOAD.declared_weight, + ); let post_dispatch_weight = Pallet::::receive_messages_proof( Origin::signed(1), TEST_RELAYER_A, @@ -1848,7 +2056,8 @@ mod tests { assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight); // when dispatch is returning `unspent_weight > declared_weight` - let (pre, post) = submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false); + let (pre, post) = + submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false); assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight); // when there's no unspent weight @@ -1923,10 +2132,231 @@ mod tests { )); // ensure that both callbacks have been called twice: for 1+2, then for 3 - crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); - crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_message_3); - crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); - crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_message_3); + TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); + TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_message_3); + TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); + TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_message_3); + }); + } + + fn confirm_3_messages_delivery() -> (Weight, Weight) { + send_regular_message(); + send_regular_message(); + send_regular_message(); + + let proof = TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 0, + relayers: vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)].into_iter().collect(), + }, + ))); + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 3, + ..Default::default() + }; + let pre_dispatch_weight = + ::WeightInfo::receive_messages_delivery_proof_weight( + &proof, + &relayers_state, + crate::mock::DbWeight::get(), + ); + let post_dispatch_weight = Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + proof, + relayers_state, + ) + .expect("confirmation has failed") + .actual_weight + .expect("receive_messages_delivery_proof always returns Some"); + (pre_dispatch_weight, post_dispatch_weight) + } + + #[test] + fn receive_messages_delivery_proof_refunds_zero_weight() { + run_test(|| { + let (pre_dispatch_weight, post_dispatch_weight) = confirm_3_messages_delivery(); + assert_eq!(pre_dispatch_weight, post_dispatch_weight); + }); + } + + #[test] + fn receive_messages_delivery_proof_refunds_non_zero_weight() { + run_test(|| { + TestOnDeliveryConfirmed1::set_consumed_weight_per_message( + crate::mock::DbWeight::get().writes(1), + ); + + let (pre_dispatch_weight, post_dispatch_weight) = confirm_3_messages_delivery(); + assert_eq!( + pre_dispatch_weight.saturating_sub(post_dispatch_weight), + crate::mock::DbWeight::get().reads(1) * 3 + ); + }); + } + + #[test] + #[should_panic] + fn receive_messages_panics_in_debug_mode_if_callback_is_wrong() { + run_test(|| { + TestOnDeliveryConfirmed1::set_consumed_weight_per_message( + crate::mock::DbWeight::get().reads_writes(2, 2), + ); + confirm_3_messages_delivery() + }); + } + + #[test] + fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected( + ) { + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(); + + // 1) InboundLaneData declares that the `last_confirmed_nonce` is 1; + // 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()` + // returns `last_confirmed_nonce`; + // 3) it means that we're going to confirm delivery of messages 1..=1; + // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and + // numer of actually confirmed messages is `1`. + assert_noop!( + Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() }, + ))), + UnrewardedRelayersState::default(), + ), + Error::::TryingToConfirmMoreMessagesThanExpected, + ); + }); + } + + #[test] + fn increase_message_fee_weight_depends_on_message_size() { + run_test(|| { + let mut small_payload = message_payload(0, 100); + let mut large_payload = message_payload(1, 100); + small_payload.extra = vec![1; 100]; + large_payload.extra = vec![2; 16_384]; + + assert_ok!(Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + small_payload, + 100, + )); + assert_ok!(Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + large_payload, + 100, + )); + + let small_weight = + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1) + .expect("increase_message_fee has failed") + .actual_weight + .expect("increase_message_fee always returns Some"); + + let large_weight = + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 2, 1) + .expect("increase_message_fee has failed") + .actual_weight + .expect("increase_message_fee always returns Some"); + + assert!( + large_weight > small_weight, + "Actual post-dispatch weigth for larger message {} must be larger than {} for small message", + large_weight, + small_weight, + ); + }); + } + + #[test] + fn weight_is_refunded_for_messages_that_are_not_pruned() { + run_test(|| { + // send first MAX messages - no messages are pruned + let max_messages_to_prune = crate::mock::MaxMessagesToPruneAtOnce::get(); + let when_zero_messages_are_pruned = send_regular_message(); + let mut delivered_messages = DeliveredMessages::new(1, true); + for _ in 1..max_messages_to_prune { + assert_eq!(send_regular_message(), when_zero_messages_are_pruned); + delivered_messages.note_dispatched_message(true); + } + + // confirm delivery of all sent messages + assert_ok!(Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + relayers: vec![UnrewardedRelayer { + relayer: 0, + messages: delivered_messages, + }] + .into_iter() + .collect(), + }, + ))), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: max_messages_to_prune, + ..Default::default() + }, + )); + + // when next message is sent, MAX messages are pruned + let weight_when_max_messages_are_pruned = send_regular_message(); + assert_eq!( + weight_when_max_messages_are_pruned, + when_zero_messages_are_pruned + + crate::mock::DbWeight::get().writes(max_messages_to_prune), + ); + }); + } + + #[test] + fn message_accepted_callbacks_are_called() { + run_test(|| { + send_regular_message(); + TestOnMessageAccepted::ensure_called(&TEST_LANE_ID, &1); + }); + } + + #[test] + #[should_panic] + fn message_accepted_panics_in_debug_mode_if_callback_is_wrong() { + run_test(|| { + TestOnMessageAccepted::set_consumed_weight_per_message( + crate::mock::DbWeight::get().reads_writes(2, 2), + ); + send_regular_message(); + }); + } + + #[test] + fn message_accepted_refunds_non_zero_weight() { + run_test(|| { + TestOnMessageAccepted::set_consumed_weight_per_message( + crate::mock::DbWeight::get().writes(1), + ); + let actual_callback_weight = send_regular_message(); + let pre_dispatch_weight = ::WeightInfo::send_message_weight( + ®ULAR_PAYLOAD, + crate::mock::DbWeight::get(), + ); + let prune_weight = crate::mock::DbWeight::get() + .writes(::MaxMessagesToPruneAtOnce::get()); + + assert_eq!( + pre_dispatch_weight.saturating_sub(actual_callback_weight), + crate::mock::DbWeight::get().reads(1).saturating_add(prune_weight) + ); }); } } diff --git a/polkadot/bridges/modules/messages/src/mock.rs b/polkadot/bridges/modules/messages/src/mock.rs index 35358b76f26de6c85966ee67985f9607d607cc67..a333c95bb58b812ec0de1c316a8576c7ca474d8c 100644 --- a/polkadot/bridges/modules/messages/src/mock.rs +++ b/polkadot/bridges/modules/messages/src/mock.rs @@ -17,21 +17,26 @@ // From construct_runtime macro #![allow(clippy::from_over_into)] -use crate::Config; +use crate::{instant_payments::cal_relayers_rewards, Config}; use bitvec::prelude::*; use bp_messages::{ source_chain::{ - LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, Sender, - TargetHeaderChain, + LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, + OnMessageAccepted, Sender, TargetHeaderChain, }, - target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, - Parameter as MessagesParameter, UnrewardedRelayer, + target_chain::{ + DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain, + }, + DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, + OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayer, }; use bp_runtime::{messages::MessageDispatchResult, Size}; use codec::{Decode, Encode}; -use frame_support::{parameter_types, weights::Weight}; +use frame_support::{ + parameter_types, + weights::{RuntimeDbWeight, Weight}, +}; use scale_info::TypeInfo; use sp_core::H256; use sp_runtime::{ @@ -39,7 +44,10 @@ use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, FixedU128, Perbill, }; -use std::collections::BTreeMap; +use std::{ + collections::{BTreeMap, VecDeque}, + ops::RangeInclusive, +}; pub type AccountId = u64; pub type Balance = u64; @@ -51,9 +59,11 @@ pub struct TestPayload { pub declared_weight: Weight, /// Message dispatch result. /// - /// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, but for test - /// purposes we'll be making it larger than `declared_weight` sometimes. + /// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, + /// but for test purposes we'll be making it larger than `declared_weight` sometimes. pub dispatch_result: MessageDispatchResult, + /// Extra bytes that affect payload size. + pub extra: Vec, } pub type TestMessageFee = u64; pub type TestRelayer = u64; @@ -88,6 +98,7 @@ parameter_types! { pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; } impl frame_system::Config for TestRuntime { @@ -111,7 +122,7 @@ impl frame_system::Config for TestRuntime { type SystemWeightInfo = (); type BlockWeights = (); type BlockLength = (); - type DbWeight = (); + type DbWeight = DbWeight; type SS58Prefix = (); type OnSetCode = (); } @@ -137,6 +148,7 @@ parameter_types! { pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16; pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 32; pub storage TokenConversionRate: FixedU128 = 1.into(); + pub const TestBridgedChainId: bp_runtime::ChainId = *b"test"; } #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] @@ -147,7 +159,8 @@ pub enum TestMessagesParameter { impl MessagesParameter for TestMessagesParameter { fn save(&self) { match *self { - TestMessagesParameter::TokenConversionRate(conversion_rate) => TokenConversionRate::set(&conversion_rate), + TestMessagesParameter::TokenConversionRate(conversion_rate) => + TokenConversionRate::set(&conversion_rate), } } } @@ -172,15 +185,17 @@ impl Config for TestRuntime { type TargetHeaderChain = TestTargetHeaderChain; type LaneMessageVerifier = TestLaneMessageVerifier; type MessageDeliveryAndDispatchPayment = TestMessageDeliveryAndDispatchPayment; + type OnMessageAccepted = TestOnMessageAccepted; type OnDeliveryConfirmed = (TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2); type SourceHeaderChain = TestSourceHeaderChain; type MessageDispatch = TestMessageDispatch; + type BridgedChainId = TestBridgedChainId; } impl Size for TestPayload { fn size_hint(&self) -> u32 { - 16 + 16 + self.extra.len() as u32 } } @@ -227,14 +242,12 @@ impl From>, ()>> for TestMessagesProof { fn from(result: Result>, ()>) -> Self { Self { result: result.map(|messages| { - let mut messages_by_lane: BTreeMap>> = - BTreeMap::new(); + let mut messages_by_lane: BTreeMap< + LaneId, + ProvedLaneMessages>, + > = BTreeMap::new(); for message in messages { - messages_by_lane - .entry(message.key.lane_id) - .or_default() - .messages - .push(message); + messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message); } messages_by_lane.into_iter().collect() }), @@ -310,7 +323,8 @@ impl TestMessageDeliveryAndDispatchPayment { /// Returns true if given fee has been paid by given submitter. pub fn is_fee_paid(submitter: AccountId, fee: TestMessageFee) -> bool { - frame_support::storage::unhashed::get(b":message-fee:") == Some((Sender::Signed(submitter), fee)) + frame_support::storage::unhashed::get(b":message-fee:") == + Some((Sender::Signed(submitter), fee)) } /// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is @@ -321,7 +335,9 @@ impl TestMessageDeliveryAndDispatchPayment { } } -impl MessageDeliveryAndDispatchPayment for TestMessageDeliveryAndDispatchPayment { +impl MessageDeliveryAndDispatchPayment + for TestMessageDeliveryAndDispatchPayment +{ type Error = &'static str; fn pay_delivery_and_dispatch_fee( @@ -330,7 +346,7 @@ impl MessageDeliveryAndDispatchPayment for TestMessag _relayer_fund_account: &AccountId, ) -> Result<(), Self::Error> { if frame_support::storage::unhashed::get(b":reject-message-fee:") == Some(true) { - return Err(TEST_ERROR); + return Err(TEST_ERROR) } frame_support::storage::unhashed::put(b":message-fee:", &(submitter, fee)); @@ -338,17 +354,51 @@ impl MessageDeliveryAndDispatchPayment for TestMessag } fn pay_relayers_rewards( + lane_id: LaneId, + message_relayers: VecDeque>, _confirmation_relayer: &AccountId, - relayers_rewards: RelayersRewards, + received_range: &RangeInclusive, _relayer_fund_account: &AccountId, ) { - for (relayer, reward) in relayers_rewards { + let relayers_rewards = + cal_relayers_rewards::(lane_id, message_relayers, received_range); + for (relayer, reward) in &relayers_rewards { let key = (b":relayer-reward:", relayer, reward.reward).encode(); frame_support::storage::unhashed::put(&key, &true); } } } +#[derive(Debug)] +pub struct TestOnMessageAccepted; + +impl TestOnMessageAccepted { + /// Verify that the callback has been called when the message is accepted. + pub fn ensure_called(lane: &LaneId, message: &MessageNonce) { + let key = (b"TestOnMessageAccepted", lane, message).encode(); + assert_eq!(frame_support::storage::unhashed::get(&key), Some(true)); + } + + /// Set consumed weight returned by the callback. + pub fn set_consumed_weight_per_message(weight: Weight) { + frame_support::storage::unhashed::put(b"TestOnMessageAccepted_Weight", &weight); + } + + /// Get consumed weight returned by the callback. + pub fn get_consumed_weight_per_message() -> Option { + frame_support::storage::unhashed::get(b"TestOnMessageAccepted_Weight") + } +} + +impl OnMessageAccepted for TestOnMessageAccepted { + fn on_messages_accepted(lane: &LaneId, message: &MessageNonce) -> Weight { + let key = (b"TestOnMessageAccepted", lane, message).encode(); + frame_support::storage::unhashed::put(&key, &true); + Self::get_consumed_weight_per_message() + .unwrap_or_else(|| DbWeight::get().reads_writes(1, 1)) + } +} + /// First on-messages-delivered callback. #[derive(Debug)] pub struct TestOnDeliveryConfirmed1; @@ -359,16 +409,29 @@ impl TestOnDeliveryConfirmed1 { let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode(); assert_eq!(frame_support::storage::unhashed::get(&key), Some(true)); } + + /// Set consumed weight returned by the callback. + pub fn set_consumed_weight_per_message(weight: Weight) { + frame_support::storage::unhashed::put(b"TestOnDeliveryConfirmed1_Weight", &weight); + } + + /// Get consumed weight returned by the callback. + pub fn get_consumed_weight_per_message() -> Option { + frame_support::storage::unhashed::get(b"TestOnDeliveryConfirmed1_Weight") + } } impl OnDeliveryConfirmed for TestOnDeliveryConfirmed1 { - fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) { + fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight { let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode(); frame_support::storage::unhashed::put(&key, &true); + Self::get_consumed_weight_per_message() + .unwrap_or_else(|| DbWeight::get().reads_writes(1, 1)) + .saturating_mul(messages.total_messages()) } } -/// Seconde on-messages-delivered callback. +/// Second on-messages-delivered callback. #[derive(Debug)] pub struct TestOnDeliveryConfirmed2; @@ -381,9 +444,10 @@ impl TestOnDeliveryConfirmed2 { } impl OnDeliveryConfirmed for TestOnDeliveryConfirmed2 { - fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) { + fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight { let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode(); frame_support::storage::unhashed::put(&key, &true); + 0 } } @@ -400,10 +464,7 @@ impl SourceHeaderChain for TestSourceHeaderChain { proof: Self::MessagesProof, _messages_count: u32, ) -> Result>, Self::Error> { - proof - .result - .map(|proof| proof.into_iter().collect()) - .map_err(|_| TEST_ERROR) + proof.result.map(|proof| proof.into_iter().collect()).map_err(|_| TEST_ERROR) } } @@ -434,30 +495,17 @@ impl MessageDispatch for TestMessageDispatch { /// Return test lane message with given nonce and payload. pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message { - Message { - key: MessageKey { - lane_id: TEST_LANE_ID, - nonce, - }, - data: message_data(payload), - } + Message { key: MessageKey { lane_id: TEST_LANE_ID, nonce }, data: message_data(payload) } } /// Constructs message payload using given arguments and zero unspent weight. pub const fn message_payload(id: u64, declared_weight: Weight) -> TestPayload { - TestPayload { - id, - declared_weight, - dispatch_result: dispatch_result(0), - } + TestPayload { id, declared_weight, dispatch_result: dispatch_result(0), extra: Vec::new() } } /// Return message data with valid fee for given payload. pub fn message_data(payload: TestPayload) -> MessageData { - MessageData { - payload: payload.encode(), - fee: 1, - } + MessageData { payload: payload.encode(), fee: 1 } } /// Returns message dispatch result with given unspent weight. @@ -491,14 +539,10 @@ pub fn unrewarded_relayer( /// Run pallet test. pub fn run_test(test: impl FnOnce() -> T) -> T { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(ENDOWED_ACCOUNT, 1_000_000)] } + .assimilate_storage(&mut t) .unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(ENDOWED_ACCOUNT, 1_000_000)], - } - .assimilate_storage(&mut t) - .unwrap(); let mut ext = sp_io::TestExternalities::new(t); ext.execute_with(test) } diff --git a/polkadot/bridges/modules/messages/src/outbound_lane.rs b/polkadot/bridges/modules/messages/src/outbound_lane.rs index 44061d984e1d08f3dbcfcb7e179554878fb01e97..c05437596db82af9f4ca4d3bacae457aa32c58dc 100644 --- a/polkadot/bridges/modules/messages/src/outbound_lane.rs +++ b/polkadot/bridges/modules/messages/src/outbound_lane.rs @@ -18,7 +18,8 @@ use bitvec::prelude::*; use bp_messages::{ - DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, + DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData, + UnrewardedRelayer, }; use frame_support::RuntimeDebug; use sp_std::collections::vec_deque::VecDeque; @@ -49,7 +50,7 @@ pub enum ReceivalConfirmationResult { /// New messages have been confirmed by the confirmation transaction. ConfirmedMessages(DeliveredMessages), /// Confirmation transaction brings no new confirmation. This may be a result of relayer - /// error or several relayers runnng. + /// error or several relayers running. NoNewConfirmations, /// Bridged chain is trying to confirm more messages than we have generated. May be a result /// of invalid bridged chain storage. @@ -57,12 +58,14 @@ pub enum ReceivalConfirmationResult { /// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged /// chain storage. EmptyUnrewardedRelayerEntry, - /// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid bridged - /// chain storage. + /// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid + /// bridged chain storage. NonConsecutiveUnrewardedRelayerEntries, - /// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May be - /// a result of invalid bridged chain storage. + /// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May + /// be a result of invalid bridged chain storage. InvalidNumberOfDispatchResults, + /// The chain has more messages that need to be confirmed than there is in the proof. + TryingToConfirmMoreMessagesThanExpected(MessageNonce), } /// Outbound messages lane. @@ -98,30 +101,44 @@ impl OutboundLane { /// Confirm messages delivery. pub fn confirm_delivery( &mut self, - latest_received_nonce: MessageNonce, + max_allowed_messages: MessageNonce, + latest_delivered_nonce: MessageNonce, relayers: &VecDeque>, ) -> ReceivalConfirmationResult { let mut data = self.storage.data(); - if latest_received_nonce <= data.latest_received_nonce { - return ReceivalConfirmationResult::NoNewConfirmations; + if latest_delivered_nonce <= data.latest_received_nonce { + return ReceivalConfirmationResult::NoNewConfirmations + } + if latest_delivered_nonce > data.latest_generated_nonce { + return ReceivalConfirmationResult::FailedToConfirmFutureMessages } - if latest_received_nonce > data.latest_generated_nonce { - return ReceivalConfirmationResult::FailedToConfirmFutureMessages; + if latest_delivered_nonce - data.latest_received_nonce > max_allowed_messages { + // that the relayer has declared correct number of messages that the proof contains (it + // is checked outside of the function). But it may happen (but only if this/bridged + // chain storage is corrupted, though) that the actual number of confirmed messages if + // larger than declared. This would mean that 'reward loop' will take more time than the + // weight formula accounts, so we can't allow that. + return ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected( + latest_delivered_nonce - data.latest_received_nonce, + ) } - let dispatch_results = - match extract_dispatch_results(data.latest_received_nonce, latest_received_nonce, relayers) { - Ok(dispatch_results) => dispatch_results, - Err(extract_error) => return extract_error, - }; + let dispatch_results = match extract_dispatch_results( + data.latest_received_nonce, + latest_delivered_nonce, + relayers, + ) { + Ok(dispatch_results) => dispatch_results, + Err(extract_error) => return extract_error, + }; let prev_latest_received_nonce = data.latest_received_nonce; - data.latest_received_nonce = latest_received_nonce; + data.latest_received_nonce = latest_delivered_nonce; self.storage.set_data(data); ReceivalConfirmationResult::ConfirmedMessages(DeliveredMessages { begin: prev_latest_received_nonce + 1, - end: latest_received_nonce, + end: latest_delivered_nonce, dispatch_results, }) } @@ -133,7 +150,9 @@ impl OutboundLane { let mut pruned_messages = 0; let mut anything_changed = false; let mut data = self.storage.data(); - while pruned_messages < max_messages_to_prune && data.oldest_unpruned_nonce <= data.latest_received_nonce { + while pruned_messages < max_messages_to_prune && + data.oldest_unpruned_nonce <= data.latest_received_nonce + { self.storage.remove_message(&data.oldest_unpruned_nonce); anything_changed = true; @@ -158,9 +177,10 @@ fn extract_dispatch_results( latest_received_nonce: MessageNonce, relayers: &VecDeque>, ) -> Result { - // the only caller of this functions checks that the prev_latest_received_nonce..=latest_received_nonce - // is valid, so we're ready to accept messages in this range - // => with_capacity call must succeed here or we'll be unable to receive confirmations at all + // the only caller of this functions checks that the + // prev_latest_received_nonce..=latest_received_nonce is valid, so we're ready to accept + // messages in this range => with_capacity call must succeed here or we'll be unable to receive + // confirmations at all let mut received_dispatch_result = BitVec::with_capacity((latest_received_nonce - prev_latest_received_nonce + 1) as _); let mut last_entry_end: Option = None; @@ -168,43 +188,48 @@ fn extract_dispatch_results( // unrewarded relayer entry must have at least 1 unconfirmed message // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end < entry.messages.begin { - return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry); + return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry) } // every entry must confirm range of messages that follows previous entry range // (guaranteed by the `InboundLane::receive_message()`) if let Some(last_entry_end) = last_entry_end { let expected_entry_begin = last_entry_end.checked_add(1); if expected_entry_begin != Some(entry.messages.begin) { - return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries); + return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries) } } last_entry_end = Some(entry.messages.end); // entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()` // (guaranteed by the `InboundLane::receive_message()`) if entry.messages.end > latest_received_nonce { - // technically this will be detected in the next loop iteration as `InvalidNumberOfDispatchResults` - // but to guarantee safety of loop operations below this is detected now - return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages); + // technically this will be detected in the next loop iteration as + // `InvalidNumberOfDispatchResults` but to guarantee safety of loop operations below + // this is detected now + return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages) } // entry must have single dispatch result for every message // (guaranteed by the `InboundLane::receive_message()`) - if entry.messages.dispatch_results.len() as MessageNonce != entry.messages.end - entry.messages.begin + 1 { - return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults); + if entry.messages.dispatch_results.len() as MessageNonce != + entry.messages.end - entry.messages.begin + 1 + { + return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults) } // now we know that the entry is valid // => let's check if it brings new confirmations - let new_messages_begin = sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1); + let new_messages_begin = + sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1); let new_messages_end = sp_std::cmp::min(entry.messages.end, latest_received_nonce); let new_messages_range = new_messages_begin..=new_messages_end; if new_messages_range.is_empty() { - continue; + continue } // now we know that entry brings new confirmations // => let's extract dispatch results received_dispatch_result.extend_from_bitslice( - &entry.messages.dispatch_results[(new_messages_begin - entry.messages.begin) as usize..], + &entry.messages.dispatch_results + [(new_messages_begin - entry.messages.begin) as usize..], ); } @@ -215,12 +240,17 @@ fn extract_dispatch_results( mod tests { use super::*; use crate::{ - mock::{message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID}, + mock::{ + message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, + TEST_LANE_ID, + }, outbound_lane, }; use sp_std::ops::RangeInclusive; - fn unrewarded_relayers(nonces: RangeInclusive) -> VecDeque> { + fn unrewarded_relayers( + nonces: RangeInclusive, + ) -> VecDeque> { vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)] .into_iter() .collect() @@ -245,7 +275,7 @@ mod tests { lane.send_message(message_data(REGULAR_PAYLOAD)); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); - let result = lane.confirm_delivery(latest_received_nonce, relayers); + let result = lane.confirm_delivery(3, latest_received_nonce, relayers); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); result @@ -273,7 +303,7 @@ mod tests { assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); assert_eq!( - lane.confirm_delivery(3, &unrewarded_relayers(1..=3)), + lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); @@ -291,18 +321,18 @@ mod tests { assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 0); assert_eq!( - lane.confirm_delivery(3, &unrewarded_relayers(1..=3)), + lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), ); assert_eq!( - lane.confirm_delivery(3, &unrewarded_relayers(1..=3)), + lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), ReceivalConfirmationResult::NoNewConfirmations, ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); assert_eq!(lane.storage.data().latest_received_nonce, 3); assert_eq!( - lane.confirm_delivery(2, &unrewarded_relayers(1..=1)), + lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)), ReceivalConfirmationResult::NoNewConfirmations, ); assert_eq!(lane.storage.data().latest_generated_nonce, 3); @@ -393,18 +423,40 @@ mod tests { assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); // after confirmation, some messages are received assert_eq!( - lane.confirm_delivery(2, &unrewarded_relayers(1..=2)), + lane.confirm_delivery(2, 2, &unrewarded_relayers(1..=2)), ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=2)), ); assert_eq!(lane.prune_messages(100), 2); assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3); // after last message is confirmed, everything is pruned assert_eq!( - lane.confirm_delivery(3, &unrewarded_relayers(3..=3)), + lane.confirm_delivery(1, 3, &unrewarded_relayers(3..=3)), ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(3..=3)), ); assert_eq!(lane.prune_messages(100), 1); assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4); }); } + + #[test] + fn confirm_delivery_detects_when_more_than_expected_messages_are_confirmed() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + assert_eq!( + lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)), + ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(3), + ); + assert_eq!( + lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)), + ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(3), + ); + assert_eq!( + lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), + ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), + ); + }); + } } diff --git a/polkadot/bridges/modules/messages/src/weights.rs b/polkadot/bridges/modules/messages/src/weights.rs index 9b65c8217ad64c23ccc4e81c4e4aa31158780beb..9dce11168fbbc2cdaa347bac2bae61989cac313f 100644 --- a/polkadot/bridges/modules/messages/src/weights.rs +++ b/polkadot/bridges/modules/messages/src/weights.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Autogenerated weights for pallet_bridge_messages +//! Autogenerated weights for `pallet_bridge_messages` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 //! DATE: 2021-06-18, STEPS: [50, ], REPEAT: 20 @@ -46,12 +46,13 @@ use frame_support::{ }; use sp_std::marker::PhantomData; -/// Weight functions needed for pallet_bridge_messages. +/// Weight functions needed for `pallet_bridge_messages`. pub trait WeightInfo { fn send_minimal_message_worst_case() -> Weight; fn send_1_kb_message_worst_case() -> Weight; fn send_16_kb_message_worst_case() -> Weight; - fn increase_message_fee() -> Weight; + fn maximal_increase_message_fee() -> Weight; + fn increase_message_fee(i: u32) -> Weight; fn receive_single_message_proof() -> Weight; fn receive_two_messages_proof() -> Weight; fn receive_single_message_proof_with_outbound_lane_state() -> Weight; @@ -70,7 +71,7 @@ pub trait WeightInfo { fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight; } -/// Weights for pallet_bridge_messages using the Rialto node and recommended hardware. +/// Weights for `pallet_bridge_messages` using the Rialto node and recommended hardware. pub struct RialtoWeight(PhantomData); impl WeightInfo for RialtoWeight { fn send_minimal_message_worst_case() -> Weight { @@ -88,8 +89,14 @@ impl WeightInfo for RialtoWeight { .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(12 as Weight)) } - fn increase_message_fee() -> Weight { - (6_709_925_000 as Weight) + fn maximal_increase_message_fee() -> Weight { + (6_781_470_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn increase_message_fee(i: u32) -> Weight { + (114_963_000 as Weight) + .saturating_add((6_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -202,8 +209,14 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(12 as Weight)) } - fn increase_message_fee() -> Weight { - (6_709_925_000 as Weight) + fn maximal_increase_message_fee() -> Weight { + (6_781_470_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn increase_message_fee(i: u32) -> Weight { + (114_963_000 as Weight) + .saturating_add((6_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } diff --git a/polkadot/bridges/modules/messages/src/weights_ext.rs b/polkadot/bridges/modules/messages/src/weights_ext.rs index be440174b4b9bcb7d89e7ac87a04c611aebce9fa..fef09c6cebe577a1e92a5a36c5dda9549a135a28 100644 --- a/polkadot/bridges/modules/messages/src/weights_ext.rs +++ b/polkadot/bridges/modules/messages/src/weights_ext.rs @@ -20,21 +20,27 @@ use crate::weights::WeightInfo; use bp_messages::{MessageNonce, UnrewardedRelayersState}; use bp_runtime::{PreComputedSize, Size}; -use frame_support::weights::Weight; +use frame_support::weights::{RuntimeDbWeight, Weight}; /// Size of the message being delivered in benchmarks. pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128; -/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of calls -/// we're checking here would fit 1KB. +/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of +/// calls we're checking here would fit 1KB. const SIGNED_EXTENSIONS_SIZE: u32 = 1024; +/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at +/// Rialto chain. This mostly depends on number of entries (and their density) in the storage trie. +/// Some reserve is reserved to account future chain growth. +pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; + /// Ensure that weights from `WeightInfoExt` implementation are looking correct. pub fn ensure_weights_are_correct( expected_default_message_delivery_tx_weight: Weight, expected_additional_byte_delivery_weight: Weight, expected_messages_delivery_confirmation_tx_weight: Weight, expected_pay_inbound_dispatch_fee_weight: Weight, + db_weight: RuntimeDbWeight, ) { // verify `send_message` weight components assert_ne!(W::send_message_overhead(), 0); @@ -48,12 +54,15 @@ pub fn ensure_weights_are_correct( // verify that the hardcoded value covers `receive_messages_proof` weight let actual_single_regular_message_delivery_tx_weight = W::receive_messages_proof_weight( - &PreComputedSize((EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize), + &PreComputedSize( + (EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize, + ), 1, 0, ); assert!( - actual_single_regular_message_delivery_tx_weight <= expected_default_message_delivery_tx_weight, + actual_single_regular_message_delivery_tx_weight <= + expected_default_message_delivery_tx_weight, "Default message delivery transaction weight {} is larger than expected weight {}", actual_single_regular_message_delivery_tx_weight, expected_default_message_delivery_tx_weight, @@ -82,9 +91,11 @@ pub fn ensure_weights_are_correct( total_messages: 1, ..Default::default() }, + db_weight, ); assert!( - actual_messages_delivery_confirmation_tx_weight <= expected_messages_delivery_confirmation_tx_weight, + actual_messages_delivery_confirmation_tx_weight <= + expected_messages_delivery_confirmation_tx_weight, "Messages delivery confirmation transaction weight {} is larger than expected weight {}", actual_messages_delivery_confirmation_tx_weight, expected_messages_delivery_confirmation_tx_weight, @@ -108,7 +119,8 @@ pub fn ensure_able_to_receive_message( max_incoming_message_dispatch_weight: Weight, ) { // verify that we're able to receive proof of maximal-size message - let max_delivery_transaction_size = max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); + let max_delivery_transaction_size = + max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); assert!( max_delivery_transaction_size <= max_extrinsic_size, "Size of maximal message delivery transaction {} + {} is larger than maximal possible transaction size {}", @@ -119,7 +131,9 @@ pub fn ensure_able_to_receive_message( // verify that we're able to receive proof of maximal-size message with maximal dispatch weight let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight( - &PreComputedSize((max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize), + &PreComputedSize( + (max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize, + ), 1, max_incoming_message_dispatch_weight, ); @@ -138,6 +152,7 @@ pub fn ensure_able_to_receive_confirmation( max_inbound_lane_data_proof_size_from_peer_chain: u32, max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce, max_unconfirmed_messages_at_inbound_lane: MessageNonce, + db_weight: RuntimeDbWeight, ) { // verify that we're able to receive confirmation of maximal-size let max_confirmation_transaction_size = @@ -150,7 +165,8 @@ pub fn ensure_able_to_receive_confirmation( max_extrinsic_size, ); - // verify that we're able to reward maximal number of relayers that have delivered maximal number of messages + // verify that we're able to reward maximal number of relayers that have delivered maximal + // number of messages let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight( &PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize), &UnrewardedRelayersState { @@ -158,6 +174,7 @@ pub fn ensure_able_to_receive_confirmation( total_messages: max_unconfirmed_messages_at_inbound_lane, ..Default::default() }, + db_weight, ); assert!( max_confirmation_transaction_dispatch_weight <= max_extrinsic_weight, @@ -180,18 +197,26 @@ pub trait WeightInfoExt: WeightInfo { // Functions that are directly mapped to extrinsics weights. /// Weight of message send extrinsic. - fn send_message_weight(message: &impl Size) -> Weight { + fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight { let transaction_overhead = Self::send_message_overhead(); let message_size_overhead = Self::send_message_size_overhead(message.size_hint()); + let call_back_overhead = Self::single_message_callback_overhead(db_weight); - transaction_overhead.saturating_add(message_size_overhead) + transaction_overhead + .saturating_add(message_size_overhead) + .saturating_add(call_back_overhead) } /// Weight of message delivery extrinsic. - fn receive_messages_proof_weight(proof: &impl Size, messages_count: u32, dispatch_weight: Weight) -> Weight { + fn receive_messages_proof_weight( + proof: &impl Size, + messages_count: u32, + dispatch_weight: Weight, + ) -> Weight { // basic components of extrinsic weight let transaction_overhead = Self::receive_messages_proof_overhead(); - let outbound_state_delivery_weight = Self::receive_messages_proof_outbound_lane_state_overhead(); + let outbound_state_delivery_weight = + Self::receive_messages_proof_outbound_lane_state_overhead(); let messages_delivery_weight = Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count)); let messages_dispatch_weight = dispatch_weight; @@ -201,8 +226,9 @@ pub trait WeightInfoExt: WeightInfo { .saturating_mul(messages_count.saturating_sub(1)) .saturating_add(Self::expected_extra_storage_proof_size()); let actual_proof_size = proof.size_hint(); - let proof_size_overhead = - Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size)); + let proof_size_overhead = Self::storage_proof_size_overhead( + actual_proof_size.saturating_sub(expected_proof_size), + ); transaction_overhead .saturating_add(outbound_state_delivery_weight) @@ -212,23 +238,37 @@ pub trait WeightInfoExt: WeightInfo { } /// Weight of confirmation delivery extrinsic. - fn receive_messages_delivery_proof_weight(proof: &impl Size, relayers_state: &UnrewardedRelayersState) -> Weight { + fn receive_messages_delivery_proof_weight( + proof: &impl Size, + relayers_state: &UnrewardedRelayersState, + db_weight: RuntimeDbWeight, + ) -> Weight { // basic components of extrinsic weight let transaction_overhead = Self::receive_messages_delivery_proof_overhead(); - let messages_overhead = Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages); - let relayers_overhead = - Self::receive_messages_delivery_proof_relayers_overhead(relayers_state.unrewarded_relayer_entries); + let messages_overhead = + Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages); + let relayers_overhead = Self::receive_messages_delivery_proof_relayers_overhead( + relayers_state.unrewarded_relayer_entries, + ); // proof size overhead weight let expected_proof_size = Self::expected_extra_storage_proof_size(); let actual_proof_size = proof.size_hint(); - let proof_size_overhead = - Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size)); + let proof_size_overhead = Self::storage_proof_size_overhead( + actual_proof_size.saturating_sub(expected_proof_size), + ); + + // and cost of calling `OnDeliveryConfirmed::on_messages_delivered()` for every confirmed + // message + let callback_overhead = relayers_state + .total_messages + .saturating_mul(Self::single_message_callback_overhead(db_weight)); transaction_overhead .saturating_add(messages_overhead) .saturating_add(relayers_overhead) .saturating_add(proof_size_overhead) + .saturating_add(callback_overhead) } // Functions that are used by extrinsics weights formulas. @@ -238,22 +278,26 @@ pub trait WeightInfoExt: WeightInfo { Self::send_minimal_message_worst_case() } - /// Returns weight that needs to be accounted when message of given size is sent (`send_message`). + /// Returns weight that needs to be accounted when message of given size is sent + /// (`send_message`). fn send_message_size_overhead(message_size: u32) -> Weight { let message_size_in_kb = (1024u64 + message_size as u64) / 1024; - let single_kb_weight = (Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15; + let single_kb_weight = + (Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15; message_size_in_kb * single_kb_weight } /// Returns weight overhead of message delivery transaction (`receive_messages_proof`). fn receive_messages_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = Self::receive_single_message_proof().saturating_mul(2); + let weight_of_two_messages_and_two_tx_overheads = + Self::receive_single_message_proof().saturating_mul(2); let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) + weight_of_two_messages_and_two_tx_overheads + .saturating_sub(weight_of_two_messages_and_single_tx_overhead) } - /// Returns weight that needs to be accounted when receiving given number of messages with message - /// delivery transaction (`receive_messages_proof`). + /// Returns weight that needs to be accounted when receiving given a number of messages with + /// message delivery transaction (`receive_messages_proof`). fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof(); @@ -262,37 +306,42 @@ pub trait WeightInfoExt: WeightInfo { .saturating_mul(messages as Weight) } - /// Returns weight that needs to be accounted when message delivery transaction (`receive_messages_proof`) - /// is carrying outbound lane state proof. + /// Returns weight that needs to be accounted when message delivery transaction + /// (`receive_messages_proof`) is carrying outbound lane state proof. fn receive_messages_proof_outbound_lane_state_overhead() -> Weight { - let weight_of_single_message_and_lane_state = Self::receive_single_message_proof_with_outbound_lane_state(); + let weight_of_single_message_and_lane_state = + Self::receive_single_message_proof_with_outbound_lane_state(); let weight_of_single_message = Self::receive_single_message_proof(); weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message) } - /// Returns weight overhead of delivery confirmation transaction (`receive_messages_delivery_proof`). + /// Returns weight overhead of delivery confirmation transaction + /// (`receive_messages_delivery_proof`). fn receive_messages_delivery_proof_overhead() -> Weight { let weight_of_two_messages_and_two_tx_overheads = Self::receive_delivery_proof_for_single_message().saturating_mul(2); let weight_of_two_messages_and_single_tx_overhead = Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) + weight_of_two_messages_and_two_tx_overheads + .saturating_sub(weight_of_two_messages_and_single_tx_overhead) } - /// Returns weight that needs to be accounted when receiving confirmations for given number of + /// Returns weight that needs to be accounted when receiving confirmations for given a number of /// messages with delivery confirmation transaction (`receive_messages_delivery_proof`). fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages = Self::receive_delivery_proof_for_two_messages_by_single_relayer(); + let weight_of_two_messages = + Self::receive_delivery_proof_for_two_messages_by_single_relayer(); let weight_of_single_message = Self::receive_delivery_proof_for_single_message(); weight_of_two_messages .saturating_sub(weight_of_single_message) .saturating_mul(messages as Weight) } - /// Returns weight that needs to be accounted when receiving confirmations for given number of + /// Returns weight that needs to be accounted when receiving confirmations for given a number of /// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`). fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight { - let weight_of_two_messages_by_two_relayers = Self::receive_delivery_proof_for_two_messages_by_two_relayers(); + let weight_of_two_messages_by_two_relayers = + Self::receive_delivery_proof_for_two_messages_by_two_relayers(); let weight_of_two_messages_by_single_relayer = Self::receive_delivery_proof_for_two_messages_by_single_relayer(); weight_of_two_messages_by_two_relayers @@ -300,8 +349,8 @@ pub trait WeightInfoExt: WeightInfo { .saturating_mul(relayers as Weight) } - /// Returns weight that needs to be accounted when storage proof of given size is recieved (either in - /// `receive_messages_proof` or `receive_messages_delivery_proof`). + /// Returns weight that needs to be accounted when storage proof of given size is received + /// (either in `receive_messages_proof` or `receive_messages_delivery_proof`). /// /// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof /// size depends on messages count or number of entries in the unrewarded relayers set. So this @@ -310,27 +359,39 @@ pub trait WeightInfoExt: WeightInfo { /// is less than that cost). fn storage_proof_size_overhead(proof_size: u32) -> Weight { let proof_size_in_bytes = proof_size as Weight; - let byte_weight = - (Self::receive_single_message_proof_16_kb() - Self::receive_single_message_proof_1_kb()) / (15 * 1024); + let byte_weight = (Self::receive_single_message_proof_16_kb() - + Self::receive_single_message_proof_1_kb()) / + (15 * 1024); proof_size_in_bytes * byte_weight } /// Returns weight of the pay-dispatch-fee operation for inbound messages. /// - /// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain option. + /// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain + /// option. fn pay_inbound_dispatch_fee_overhead() -> Weight { - Self::receive_single_message_proof().saturating_sub(Self::receive_single_prepaid_message_proof()) + Self::receive_single_message_proof() + .saturating_sub(Self::receive_single_prepaid_message_proof()) + } + + /// Returns pre-dispatch weight of single callback call. + /// + /// When benchmarking the weight please take into consideration both the `OnMessageAccepted` and + /// `OnDeliveryConfirmed` callbacks. The method should return the greater of the two, because + /// it's used to estimate the weight in both contexts. + fn single_message_callback_overhead(db_weight: RuntimeDbWeight) -> Weight { + db_weight.reads_writes(1, 1) } } impl WeightInfoExt for () { fn expected_extra_storage_proof_size() -> u32 { - bp_rialto::EXTRA_STORAGE_PROOF_SIZE + EXTRA_STORAGE_PROOF_SIZE } } impl WeightInfoExt for crate::weights::RialtoWeight { fn expected_extra_storage_proof_size() -> u32 { - bp_rialto::EXTRA_STORAGE_PROOF_SIZE + EXTRA_STORAGE_PROOF_SIZE } } diff --git a/polkadot/bridges/modules/shift-session-manager/Cargo.toml b/polkadot/bridges/modules/shift-session-manager/Cargo.toml index 6dac97ddde601eff0ddef5cb64dde0bae9ed5b17..9e3e15fddf897365bcb5c19b4709f01a52b9934f 100644 --- a/polkadot/bridges/modules/shift-session-manager/Cargo.toml +++ b/polkadot/bridges/modules/shift-session-manager/Cargo.toml @@ -7,20 +7,20 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [dev-dependencies] sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -serde = "1.0" [features] default = ["std"] @@ -29,6 +29,7 @@ std = [ "frame-support/std", "frame-system/std", "pallet-session/std", + "scale-info/std", "sp-staking/std", "sp-std/std", ] diff --git a/polkadot/bridges/modules/shift-session-manager/src/lib.rs b/polkadot/bridges/modules/shift-session-manager/src/lib.rs index 3635e6223d7f7afd73a44ac6e64af31d1eaac4f8..0278580981375ae692e71f3336695ab3dc609bac 100644 --- a/polkadot/bridges/modules/shift-session-manager/src/lib.rs +++ b/polkadot/bridges/modules/shift-session-manager/src/lib.rs @@ -19,22 +19,33 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{decl_module, decl_storage}; use sp_std::prelude::*; -/// The module configuration trait. -pub trait Config: pallet_session::Config {} +pub use pallet::*; -decl_module! { - /// Shift session manager pallet. - pub struct Module for enum Call where origin: T::Origin {} -} +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; -decl_storage! { - trait Store for Pallet as ShiftSessionManager { - /// Validators of first two sessions. - InitialValidators: Option>; - } + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: pallet_session::Config {} + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + /// Validators of first two sessions. + #[pallet::storage] + pub(super) type InitialValidators = StorageValue<_, Vec>; } impl pallet_session::SessionManager for Pallet { @@ -43,7 +54,7 @@ impl pallet_session::SessionManager for Pallet { fn new_session(session_index: sp_staking::SessionIndex) -> Option> { // we don't want to add even more fields to genesis config => just return None if session_index == 0 || session_index == 1 { - return None; + return None } // the idea that on first call (i.e. when session 1 ends) we're reading current @@ -90,13 +101,18 @@ mod tests { #![allow(clippy::from_over_into)] use super::*; - use frame_support::sp_io::TestExternalities; - use frame_support::sp_runtime::{ - testing::{Header, UintAuthorityId}, - traits::{BlakeTwo256, ConvertInto, IdentityLookup}, - Perbill, RuntimeAppPublic, + use frame_support::{ + parameter_types, + sp_io::TestExternalities, + sp_runtime::{ + testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + Perbill, RuntimeAppPublic, + }, + traits::GenesisBuild, + weights::Weight, + BasicExternalities, }; - use frame_support::{parameter_types, weights::Weight, BasicExternalities}; use sp_core::H256; type AccountId = u64; @@ -171,17 +187,21 @@ mod tests { impl pallet_session::SessionHandler for TestSessionHandler { const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; - fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} + fn on_genesis_session(_validators: &[(AccountId, Ks)]) { + } - fn on_new_session(_: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)]) {} + fn on_new_session( + _: bool, + _: &[(AccountId, Ks)], + _: &[(AccountId, Ks)], + ) { + } fn on_disabled(_: u32) {} } fn new_test_ext() -> TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let keys = vec![ (1, 1, UintAuthorityId(1)), diff --git a/polkadot/bridges/modules/token-swap/Cargo.toml b/polkadot/bridges/modules/token-swap/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a6103f688c424f07017960bd241426c4ba63183c --- /dev/null +++ b/polkadot/bridges/modules/token-swap/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "pallet-bridge-token-swap" +description = "An Substrate pallet that allows parties on different chains (bridged using messages pallet) to swap their tokens" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0", optional = true } + +# Bridge dependencies + +bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-token-swap = { path = "../../primitives/token-swap", default-features = false } +pallet-bridge-dispatch = { path = "../dispatch", default-features = false } +pallet-bridge-messages = { path = "../messages", default-features = false } + +# Substrate Dependencies + +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[dev-dependencies] +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = ["std"] +std = [ + "codec/std", + "bp-message-dispatch/std", + "bp-messages/std", + "bp-runtime/std", + "bp-token-swap/std", + "frame-support/std", + "frame-system/std", + "log/std", + "pallet-bridge-dispatch/std", + "pallet-bridge-messages/std", + "scale-info/std", + "serde", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking", +] diff --git a/polkadot/bridges/modules/token-swap/src/benchmarking.rs b/polkadot/bridges/modules/token-swap/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..bbc544a8b91dff1d35db2c3c55a2029d52d1f78f --- /dev/null +++ b/polkadot/bridges/modules/token-swap/src/benchmarking.rs @@ -0,0 +1,195 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Token-swap pallet benchmarking. + +use crate::{ + swap_account_id, target_account_at_this_chain, BridgedAccountIdOf, BridgedAccountPublicOf, + BridgedAccountSignatureOf, BridgedBalanceOf, Call, Pallet, ThisChainBalance, + TokenSwapCreationOf, TokenSwapOf, +}; + +use bp_token_swap::{TokenSwap, TokenSwapCreation, TokenSwapState, TokenSwapType}; +use codec::Encode; +use frame_benchmarking::{account, benchmarks_instance_pallet}; +use frame_support::{traits::Currency, Parameter}; +use frame_system::RawOrigin; +use sp_core::H256; +use sp_io::hashing::blake2_256; +use sp_runtime::traits::Bounded; +use sp_std::vec::Vec; + +const SEED: u32 = 0; + +/// Trait that must be implemented by runtime. +pub trait Config: crate::Config { + /// Initialize environment for token swap. + fn initialize_environment(); +} + +benchmarks_instance_pallet! { + where_clause { + where + BridgedAccountPublicOf: Default + Parameter, + BridgedAccountSignatureOf: Default, + } + + // + // Benchmarks that are used directly by the runtime. + // + + // Benchmark `create_swap` extrinsic. + // + // This benchmark assumes that message is **NOT** actually sent. Instead we're using `send_message_weight` + // from the `WeightInfoExt` trait. + // + // There aren't any factors that affect `create_swap` performance, so everything + // is straightforward here. + create_swap { + T::initialize_environment(); + + let sender = funded_account::("source_account_at_this_chain", 0); + let swap: TokenSwapOf = test_swap::(sender.clone(), true); + let swap_creation: TokenSwapCreationOf = test_swap_creation::(); + }: create_swap( + RawOrigin::Signed(sender.clone()), + swap, + Box::new(swap_creation) + ) + verify { + assert!(crate::PendingSwaps::::contains_key(test_swap_hash::(sender, true))); + } + + // Benchmark `claim_swap` extrinsic with the worst possible conditions: + // + // * swap is locked until some block, so current block number is read. + claim_swap { + T::initialize_environment(); + + let sender: T::AccountId = account("source_account_at_this_chain", 0, SEED); + crate::PendingSwaps::::insert( + test_swap_hash::(sender.clone(), false), + TokenSwapState::Confirmed, + ); + + let swap: TokenSwapOf = test_swap::(sender.clone(), false); + let claimer = target_account_at_this_chain::(&swap); + let token_swap_account = swap_account_id::(&swap); + T::ThisCurrency::make_free_balance_be(&token_swap_account, ThisChainBalance::::max_value()); + }: claim_swap(RawOrigin::Signed(claimer), swap) + verify { + assert!(!crate::PendingSwaps::::contains_key(test_swap_hash::(sender, false))); + } + + // Benchmark `cancel_swap` extrinsic with the worst possible conditions: + // + // * swap is locked until some block, so current block number is read. + cancel_swap { + T::initialize_environment(); + + let sender: T::AccountId = account("source_account_at_this_chain", 0, SEED); + crate::PendingSwaps::::insert( + test_swap_hash::(sender.clone(), false), + TokenSwapState::Failed, + ); + + let swap: TokenSwapOf = test_swap::(sender.clone(), false); + let token_swap_account = swap_account_id::(&swap); + T::ThisCurrency::make_free_balance_be(&token_swap_account, ThisChainBalance::::max_value()); + + }: cancel_swap(RawOrigin::Signed(sender.clone()), swap) + verify { + assert!(!crate::PendingSwaps::::contains_key(test_swap_hash::(sender, false))); + } +} + +/// Returns test token swap. +fn test_swap, I: 'static>(sender: T::AccountId, is_create: bool) -> TokenSwapOf { + TokenSwap { + swap_type: TokenSwapType::LockClaimUntilBlock( + if is_create { 10u32.into() } else { 0u32.into() }, + 0.into(), + ), + source_balance_at_this_chain: source_balance_to_swap::(), + source_account_at_this_chain: sender, + target_balance_at_bridged_chain: target_balance_to_swap::(), + target_account_at_bridged_chain: target_account_at_bridged_chain::(), + } +} + +/// Returns test token swap hash. +fn test_swap_hash, I: 'static>(sender: T::AccountId, is_create: bool) -> H256 { + test_swap::(sender, is_create).using_encoded(blake2_256).into() +} + +/// Returns test token swap creation params. +fn test_swap_creation, I: 'static>() -> TokenSwapCreationOf +where + BridgedAccountPublicOf: Default, + BridgedAccountSignatureOf: Default, +{ + TokenSwapCreation { + target_public_at_bridged_chain: target_public_at_bridged_chain::(), + swap_delivery_and_dispatch_fee: swap_delivery_and_dispatch_fee::(), + bridged_chain_spec_version: 0, + bridged_currency_transfer: Vec::new(), + bridged_currency_transfer_weight: 0, + bridged_currency_transfer_signature: bridged_currency_transfer_signature::(), + } +} + +/// Account that has some balance. +fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { + let account: T::AccountId = account(name, index, SEED); + T::ThisCurrency::make_free_balance_be(&account, ThisChainBalance::::max_value()); + account +} + +/// Currency transfer message fee. +fn swap_delivery_and_dispatch_fee, I: 'static>() -> ThisChainBalance { + ThisChainBalance::::max_value() / 4u32.into() +} + +/// Balance at the source chain that we're going to swap. +fn source_balance_to_swap, I: 'static>() -> ThisChainBalance { + ThisChainBalance::::max_value() / 2u32.into() +} + +/// Balance at the target chain that we're going to swap. +fn target_balance_to_swap, I: 'static>() -> BridgedBalanceOf { + BridgedBalanceOf::::max_value() / 2u32.into() +} + +/// Public key of `target_account_at_bridged_chain`. +fn target_public_at_bridged_chain, I: 'static>() -> BridgedAccountPublicOf +where + BridgedAccountPublicOf: Default, +{ + Default::default() +} + +/// Signature of `target_account_at_bridged_chain` over message. +fn bridged_currency_transfer_signature, I: 'static>() -> BridgedAccountSignatureOf +where + BridgedAccountSignatureOf: Default, +{ + Default::default() +} + +/// Account at the bridged chain that is participating in the swap. +fn target_account_at_bridged_chain, I: 'static>() -> BridgedAccountIdOf { + Default::default() +} diff --git a/polkadot/bridges/modules/token-swap/src/lib.rs b/polkadot/bridges/modules/token-swap/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..43fa13ba4bdb869bccd82e9447004b7bd670257b --- /dev/null +++ b/polkadot/bridges/modules/token-swap/src/lib.rs @@ -0,0 +1,1133 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Runtime module that allows token swap between two parties acting on different chains. +//! +//! The swap is made using message lanes between This (where `pallet-bridge-token-swap` pallet +//! is deployed) and some other Bridged chain. No other assumptions about the Bridged chain are +//! made, so we don't need it to have an instance of the `pallet-bridge-token-swap` pallet deployed. +//! +//! There are four accounts participating in the swap: +//! +//! 1) account of This chain that has signed the `create_swap` transaction and has balance on This +//! chain. We'll be referring to this account as `source_account_at_this_chain`; +//! +//! 2) account of the Bridged chain that is sending the `claim_swap` message from the Bridged to +//! This chain. This account has balance on Bridged chain and is willing to swap these tokens to +//! This chain tokens of the `source_account_at_this_chain`. We'll be referring to this account +//! as `target_account_at_bridged_chain`; +//! +//! 3) account of the Bridged chain that is indirectly controlled by the +//! `source_account_at_this_chain`. We'll be referring this account as +//! `source_account_at_bridged_chain`; +//! +//! 4) account of This chain that is indirectly controlled by the `target_account_at_bridged_chain`. +//! We'll be referring this account as `target_account_at_this_chain`. +//! +//! So the tokens swap is an intention of `source_account_at_this_chain` to swap his +//! `source_balance_at_this_chain` tokens to the `target_balance_at_bridged_chain` tokens owned by +//! `target_account_at_bridged_chain`. The swap process goes as follows: +//! +//! 1) the `source_account_at_this_chain` account submits the `create_swap` transaction on This +//! chain; +//! +//! 2) the tokens transfer message that would transfer `target_balance_at_bridged_chain` +//! tokens from the `target_account_at_bridged_chain` to the `source_account_at_bridged_chain`, +//! is sent over the bridge; +//! +//! 3) when transfer message is delivered and dispatched, the pallet receives notification; +//! +//! 4) if message has been successfully dispatched, the `target_account_at_bridged_chain` sends the +//! message that would transfer `source_balance_at_this_chain` tokens to his +//! `target_account_at_this_chain` account; +//! +//! 5) if message dispatch has failed, the `source_account_at_this_chain` may submit the +//! `cancel_swap` transaction and return his `source_balance_at_this_chain` back to his account. +//! +//! While swap is pending, the `source_balance_at_this_chain` tokens are owned by the special +//! temporary `swap_account_at_this_chain` account. It is destroyed upon swap completion. + +#![cfg_attr(not(feature = "std"), no_std)] + +use bp_messages::{ + source_chain::{MessagesBridge, OnDeliveryConfirmed}, + DeliveredMessages, LaneId, MessageNonce, +}; +use bp_runtime::{messages::DispatchFeePayment, ChainId}; +use bp_token_swap::{ + RawBridgedTransferCall, TokenSwap, TokenSwapCreation, TokenSwapState, TokenSwapType, +}; +use codec::Encode; +use frame_support::{ + fail, + traits::{Currency, ExistenceRequirement}, + weights::PostDispatchInfo, +}; +use sp_core::H256; +use sp_io::hashing::blake2_256; +use sp_runtime::traits::{Convert, Saturating}; +use sp_std::boxed::Box; +use weights::WeightInfo; + +pub use weights_ext::WeightInfoExt; + +#[cfg(test)] +mod mock; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; + +pub mod weights; +pub mod weights_ext; + +pub use pallet::*; + +/// Name of the `PendingSwaps` storage map. +pub const PENDING_SWAPS_MAP_NAME: &str = "PendingSwaps"; + +// comes from #[pallet::event] +#[allow(clippy::unused_unit)] +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// Benchmarks results from runtime we're plugged into. + type WeightInfo: WeightInfoExt; + + /// Id of the bridge with the Bridged chain. + type BridgedChainId: Get; + /// The identifier of outbound message lane on This chain used to send token transfer + /// messages to the Bridged chain. + /// + /// It is highly recommended to use dedicated lane for every instance of token swap + /// pallet. Messages delivery confirmation callback is implemented in the way that + /// for every confirmed message, there is (at least) a storage read. Which mean, + /// that if pallet will see unrelated confirmations, it'll just burn storage-read + /// weight, achieving nothing. + type OutboundMessageLaneId: Get; + /// Messages bridge with Bridged chain. + type MessagesBridge: MessagesBridge< + Self::AccountId, + >::Balance, + MessagePayloadOf, + >; + + /// This chain Currency used in the tokens swap. + type ThisCurrency: Currency; + /// Converter from raw hash (derived from swap) to This chain account. + type FromSwapToThisAccountIdConverter: Convert; + + /// The chain we're bridged to. + type BridgedChain: bp_runtime::Chain; + /// Converter from raw hash (derived from Bridged chain account) to This chain account. + type FromBridgedToThisAccountIdConverter: Convert; + } + + /// Tokens balance at This chain. + pub type ThisChainBalance = <>::ThisCurrency as Currency< + ::AccountId, + >>::Balance; + + /// Type of the Bridged chain. + pub type BridgedChainOf = >::BridgedChain; + /// Tokens balance type at the Bridged chain. + pub type BridgedBalanceOf = bp_runtime::BalanceOf>; + /// Account identifier type at the Bridged chain. + pub type BridgedAccountIdOf = bp_runtime::AccountIdOf>; + /// Account public key type at the Bridged chain. + pub type BridgedAccountPublicOf = bp_runtime::AccountPublicOf>; + /// Account signature type at the Bridged chain. + pub type BridgedAccountSignatureOf = bp_runtime::SignatureOf>; + + /// Bridge message payload used by the pallet. + pub type MessagePayloadOf = bp_message_dispatch::MessagePayload< + ::AccountId, + BridgedAccountPublicOf, + BridgedAccountSignatureOf, + RawBridgedTransferCall, + >; + /// Type of `TokenSwap` used by the pallet. + pub type TokenSwapOf = TokenSwap< + BlockNumberFor, + ThisChainBalance, + ::AccountId, + BridgedBalanceOf, + BridgedAccountIdOf, + >; + /// Type of `TokenSwapCreation` used by the pallet. + pub type TokenSwapCreationOf = TokenSwapCreation< + BridgedAccountPublicOf, + ThisChainBalance, + BridgedAccountSignatureOf, + >; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet {} + + #[pallet::call] + impl, I: 'static> Pallet + where + BridgedAccountPublicOf: Parameter, + { + /// Start token swap procedure. + /// + /// The dispatch origin for this call must be exactly the + /// `swap.source_account_at_this_chain` account. + /// + /// Method arguments are: + /// + /// - `swap` - token swap intention; + /// - `swap_creation_params` - additional parameters required to start tokens swap. + /// + /// The `source_account_at_this_chain` MUST have enough balance to cover both token swap and + /// message transfer. Message fee may be estimated using corresponding `OutboundLaneApi` of + /// This runtime. + /// + /// **WARNING**: the submitter of this transaction is responsible for verifying: + /// + /// 1) that the `swap_creation_params.bridged_currency_transfer` represents a valid token + /// transfer call that transfers `swap.target_balance_at_bridged_chain` to his + /// `swap.source_account_at_bridged_chain` account; + /// + /// 2) that either the `swap.source_account_at_bridged_chain` already exists, or the + /// `swap.target_balance_at_bridged_chain` is above existential deposit of the Bridged + /// chain; + /// + /// 3) the `swap_creation_params.target_public_at_bridged_chain` matches the + /// `swap.target_account_at_bridged_chain`; + /// + /// 4) the `bridged_currency_transfer_signature` is valid and generated by the owner of + /// the `swap_creation_params.target_public_at_bridged_chain` account (read more + /// about [`CallOrigin::TargetAccount`]). + /// + /// Violating rule#1 will lead to losing your `source_balance_at_this_chain` tokens. + /// Violating other rules will lead to losing message fees for this and other transactions + + /// losing fees for message transfer. + #[allow(clippy::boxed_local)] + #[pallet::weight( + T::WeightInfo::create_swap() + .saturating_add(T::WeightInfo::send_message_weight( + &&swap_creation_params.bridged_currency_transfer[..], + T::DbWeight::get(), + )) + )] + pub fn create_swap( + origin: OriginFor, + swap: TokenSwapOf, + swap_creation_params: Box>, + ) -> DispatchResultWithPostInfo { + let TokenSwapCreation { + target_public_at_bridged_chain, + swap_delivery_and_dispatch_fee, + bridged_chain_spec_version, + bridged_currency_transfer, + bridged_currency_transfer_weight, + bridged_currency_transfer_signature, + } = *swap_creation_params; + + // ensure that the `origin` is the same account that is mentioned in the `swap` + // intention + let origin_account = ensure_signed(origin)?; + ensure!( + origin_account == swap.source_account_at_this_chain, + Error::::MismatchedSwapSourceOrigin, + ); + + // remember weight components + let base_weight = T::WeightInfo::create_swap(); + + // we can't exchange less than existential deposit (the temporary `swap_account` account + // won't be created then) + // + // the same can also happen with the `swap.bridged_balance`, but we can't check it + // here (without additional knowledge of the Bridged chain). So it is the `origin` + // responsibility to check that the swap is valid. + ensure!( + swap.source_balance_at_this_chain >= T::ThisCurrency::minimum_balance(), + Error::::TooLowBalanceOnThisChain, + ); + + // if the swap is replay-protected, then we need to ensure that we have not yet passed + // the specified block yet + match swap.swap_type { + TokenSwapType::TemporaryTargetAccountAtBridgedChain => (), + TokenSwapType::LockClaimUntilBlock(block_number, _) => ensure!( + block_number >= frame_system::Pallet::::block_number(), + Error::::SwapPeriodIsFinished, + ), + } + + let swap_account = swap_account_id::(&swap); + let actual_send_message_weight = frame_support::storage::with_transaction(|| { + // funds are transferred from This account to the temporary Swap account + let transfer_result = T::ThisCurrency::transfer( + &swap.source_account_at_this_chain, + &swap_account, + // saturating_add is ok, or we have the chain where single holder owns all + // tokens + swap.source_balance_at_this_chain + .saturating_add(swap_delivery_and_dispatch_fee), + // if we'll allow account to die, then he'll be unable to `cancel_claim` + // if something won't work + ExistenceRequirement::KeepAlive, + ); + if let Err(err) = transfer_result { + log::error!( + target: "runtime::bridge-token-swap", + "Failed to transfer This chain tokens for the swap {:?} to Swap account ({:?}): {:?}", + swap, + swap_account, + err, + ); + + return sp_runtime::TransactionOutcome::Rollback(Err( + Error::::FailedToTransferToSwapAccount, + )) + } + + // the transfer message is sent over the bridge. The message is supposed to be a + // `Currency::transfer` call on the bridged chain, but no checks are made - it is + // the transaction submitter to ensure it is valid. + let send_message_result = T::MessagesBridge::send_message( + bp_messages::source_chain::Sender::from(Some(swap_account.clone())), + T::OutboundMessageLaneId::get(), + bp_message_dispatch::MessagePayload { + spec_version: bridged_chain_spec_version, + weight: bridged_currency_transfer_weight, + origin: bp_message_dispatch::CallOrigin::TargetAccount( + swap_account, + target_public_at_bridged_chain, + bridged_currency_transfer_signature, + ), + dispatch_fee_payment: DispatchFeePayment::AtTargetChain, + call: bridged_currency_transfer, + }, + swap_delivery_and_dispatch_fee, + ); + let sent_message = match send_message_result { + Ok(sent_message) => sent_message, + Err(err) => { + log::error!( + target: "runtime::bridge-token-swap", + "Failed to send token transfer message for swap {:?} to the Bridged chain: {:?}", + swap, + err, + ); + + return sp_runtime::TransactionOutcome::Rollback(Err( + Error::::FailedToSendTransferMessage, + )) + }, + }; + + // remember that we have started the swap + let swap_hash = swap.using_encoded(blake2_256).into(); + let insert_swap_result = + PendingSwaps::::try_mutate(swap_hash, |maybe_state| { + if maybe_state.is_some() { + return Err(()) + } + + *maybe_state = Some(TokenSwapState::Started); + Ok(()) + }); + if insert_swap_result.is_err() { + log::error!( + target: "runtime::bridge-token-swap", + "Failed to start token swap {:?}: the swap is already started", + swap, + ); + + return sp_runtime::TransactionOutcome::Rollback(Err( + Error::::SwapAlreadyStarted, + )) + } + + log::trace!( + target: "runtime::bridge-token-swap", + "The swap {:?} (hash {:?}) has been started", + swap, + swap_hash, + ); + + // remember that we're waiting for the transfer message delivery confirmation + PendingMessages::::insert(sent_message.nonce, swap_hash); + + // finally - emit the event + Self::deposit_event(Event::SwapStarted(swap_hash, sent_message.nonce)); + + sp_runtime::TransactionOutcome::Commit(Ok(sent_message.weight)) + })?; + + Ok(PostDispatchInfo { + actual_weight: Some(base_weight.saturating_add(actual_send_message_weight)), + pays_fee: Pays::Yes, + }) + } + + /// Claim previously reserved `source_balance_at_this_chain` by + /// `target_account_at_this_chain`. + /// + /// **WARNING**: the correct way to call this function is to call it over the messages + /// bridge with dispatch origin set to + /// `pallet_bridge_dispatch::CallOrigin::SourceAccount(target_account_at_bridged_chain)`. + /// + /// This should be called only when successful transfer confirmation has been received. + #[pallet::weight(T::WeightInfo::claim_swap())] + pub fn claim_swap( + origin: OriginFor, + swap: TokenSwapOf, + ) -> DispatchResultWithPostInfo { + // ensure that the `origin` is controlled by the `swap.target_account_at_bridged_chain` + let origin_account = ensure_signed(origin)?; + let target_account_at_this_chain = target_account_at_this_chain::(&swap); + ensure!(origin_account == target_account_at_this_chain, Error::::InvalidClaimant,); + + // ensure that the swap is confirmed + let swap_hash = swap.using_encoded(blake2_256).into(); + let swap_state = PendingSwaps::::get(swap_hash); + match swap_state { + Some(TokenSwapState::Started) => fail!(Error::::SwapIsPending), + Some(TokenSwapState::Confirmed) => { + let is_claim_allowed = match swap.swap_type { + TokenSwapType::TemporaryTargetAccountAtBridgedChain => true, + TokenSwapType::LockClaimUntilBlock(block_number, _) => + block_number < frame_system::Pallet::::block_number(), + }; + + ensure!(is_claim_allowed, Error::::SwapIsTemporaryLocked); + }, + Some(TokenSwapState::Failed) => fail!(Error::::SwapIsFailed), + None => fail!(Error::::SwapIsInactive), + } + + complete_claim::(swap, swap_hash, origin_account, Event::SwapClaimed(swap_hash)) + } + + /// Return previously reserved `source_balance_at_this_chain` back to the + /// `source_account_at_this_chain`. + /// + /// This should be called only when transfer has failed at Bridged chain and we have + /// received notification about that. + #[pallet::weight(T::WeightInfo::cancel_swap())] + pub fn cancel_swap( + origin: OriginFor, + swap: TokenSwapOf, + ) -> DispatchResultWithPostInfo { + // ensure that the `origin` is the same account that is mentioned in the `swap` + // intention + let origin_account = ensure_signed(origin)?; + ensure!( + origin_account == swap.source_account_at_this_chain, + Error::::MismatchedSwapSourceOrigin, + ); + + // ensure that the swap has failed + let swap_hash = swap.using_encoded(blake2_256).into(); + let swap_state = PendingSwaps::::get(swap_hash); + match swap_state { + Some(TokenSwapState::Started) => fail!(Error::::SwapIsPending), + Some(TokenSwapState::Confirmed) => fail!(Error::::SwapIsConfirmed), + Some(TokenSwapState::Failed) => { + // we allow canceling swap even before lock period is over - the + // `source_account_at_this_chain` has already paid for nothing and it is up to + // him to decide whether he want to try again + }, + None => fail!(Error::::SwapIsInactive), + } + + complete_claim::(swap, swap_hash, origin_account, Event::SwapCanceled(swap_hash)) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event, I: 'static = ()> { + /// Tokens swap has been started and message has been sent to the bridged message. + /// + /// The payload is the swap hash and the transfer message nonce. + SwapStarted(H256, MessageNonce), + /// Token swap has been claimed. + SwapClaimed(H256), + /// Token swap has been canceled. + SwapCanceled(H256), + } + + #[pallet::error] + pub enum Error { + /// The account that has submitted the `start_claim` doesn't match the + /// `TokenSwap::source_account_at_this_chain`. + MismatchedSwapSourceOrigin, + /// The swap balance in This chain tokens is below existential deposit and can't be made. + TooLowBalanceOnThisChain, + /// Transfer from This chain account to temporary Swap account has failed. + FailedToTransferToSwapAccount, + /// Transfer from the temporary Swap account to the derived account of Bridged account has + /// failed. + FailedToTransferFromSwapAccount, + /// The message to transfer tokens on Target chain can't be sent. + FailedToSendTransferMessage, + /// The same swap is already started. + SwapAlreadyStarted, + /// Swap outcome is not yet received. + SwapIsPending, + /// Someone is trying to claim swap that has failed. + SwapIsFailed, + /// Claiming swap is not allowed. + /// + /// Now the only possible case when you may get this error, is when you're trying to claim + /// swap with `TokenSwapType::LockClaimUntilBlock` before lock period is over. + SwapIsTemporaryLocked, + /// Swap period is finished and you can not restart it. + /// + /// Now the only possible case when you may get this error, is when you're trying to start + /// swap with `TokenSwapType::LockClaimUntilBlock` after lock period is over. + SwapPeriodIsFinished, + /// Someone is trying to cancel swap that has been confirmed. + SwapIsConfirmed, + /// Someone is trying to claim/cancel swap that is either not started or already + /// claimed/canceled. + SwapIsInactive, + /// The swap claimant is invalid. + InvalidClaimant, + } + + /// Pending token swaps states. + #[pallet::storage] + pub type PendingSwaps, I: 'static = ()> = + StorageMap<_, Identity, H256, TokenSwapState>; + + /// Pending transfer messages. + #[pallet::storage] + pub type PendingMessages, I: 'static = ()> = + StorageMap<_, Identity, MessageNonce, H256>; + + impl, I: 'static> OnDeliveryConfirmed for Pallet { + fn on_messages_delivered(lane: &LaneId, delivered_messages: &DeliveredMessages) -> Weight { + // we're only interested in our lane messages + if *lane != T::OutboundMessageLaneId::get() { + return 0 + } + + // so now we're dealing with our lane messages. Ideally we'll have dedicated lane + // and every message from `delivered_messages` is actually our transfer message. + // But it may be some shared lane (which is not recommended). + let mut reads = 0; + let mut writes = 0; + for message_nonce in delivered_messages.begin..=delivered_messages.end { + reads += 1; + if let Some(swap_hash) = PendingMessages::::take(message_nonce) { + writes += 1; + + let token_swap_state = + if delivered_messages.message_dispatch_result(message_nonce) { + TokenSwapState::Confirmed + } else { + TokenSwapState::Failed + }; + + log::trace!( + target: "runtime::bridge-token-swap", + "The dispatch of swap {:?} has been completed with {:?} status", + swap_hash, + token_swap_state, + ); + + PendingSwaps::::insert(swap_hash, token_swap_state); + } + } + + ::DbWeight::get().reads_writes(reads, writes) + } + } + + /// Returns temporary account id used to lock funds during swap on This chain. + pub(crate) fn swap_account_id, I: 'static>( + swap: &TokenSwapOf, + ) -> T::AccountId { + T::FromSwapToThisAccountIdConverter::convert(swap.using_encoded(blake2_256).into()) + } + + /// Expected target account representation on This chain (aka `target_account_at_this_chain`). + pub(crate) fn target_account_at_this_chain, I: 'static>( + swap: &TokenSwapOf, + ) -> T::AccountId { + T::FromBridgedToThisAccountIdConverter::convert(bp_runtime::derive_account_id( + T::BridgedChainId::get(), + bp_runtime::SourceAccount::Account(swap.target_account_at_bridged_chain.clone()), + )) + } + + /// Complete claim with given outcome. + pub(crate) fn complete_claim, I: 'static>( + swap: TokenSwapOf, + swap_hash: H256, + destination_account: T::AccountId, + event: Event, + ) -> DispatchResultWithPostInfo { + let swap_account = swap_account_id::(&swap); + frame_support::storage::with_transaction(|| { + // funds are transferred from the temporary Swap account to the destination account + let transfer_result = T::ThisCurrency::transfer( + &swap_account, + &destination_account, + swap.source_balance_at_this_chain, + ExistenceRequirement::AllowDeath, + ); + if let Err(err) = transfer_result { + log::error!( + target: "runtime::bridge-token-swap", + "Failed to transfer This chain tokens for the swap {:?} from the Swap account {:?} to {:?}: {:?}", + swap, + swap_account, + destination_account, + err, + ); + + return sp_runtime::TransactionOutcome::Rollback(Err( + Error::::FailedToTransferFromSwapAccount.into(), + )) + } + + log::trace!( + target: "runtime::bridge-token-swap", + "The swap {:?} (hash {:?}) has been completed with {} status", + swap, + swap_hash, + match event { + Event::SwapClaimed(_) => "claimed", + Event::SwapCanceled(_) => "canceled", + _ => "", + }, + ); + + // forget about swap + PendingSwaps::::remove(swap_hash); + + // finally - emit the event + Pallet::::deposit_event(event); + + sp_runtime::TransactionOutcome::Commit(Ok(().into())) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::*; + use frame_support::{assert_noop, assert_ok}; + + const CAN_START_BLOCK_NUMBER: u64 = 10; + const CAN_CLAIM_BLOCK_NUMBER: u64 = CAN_START_BLOCK_NUMBER + 1; + + const BRIDGED_CHAIN_ACCOUNT: BridgedAccountId = 3; + const BRIDGED_CHAIN_SPEC_VERSION: u32 = 4; + const BRIDGED_CHAIN_CALL_WEIGHT: Balance = 5; + + fn bridged_chain_account_public() -> BridgedAccountPublic { + 1.into() + } + + fn bridged_chain_account_signature() -> BridgedAccountSignature { + sp_runtime::testing::TestSignature(2, Vec::new()) + } + + fn test_swap() -> TokenSwapOf { + bp_token_swap::TokenSwap { + swap_type: TokenSwapType::LockClaimUntilBlock(CAN_START_BLOCK_NUMBER, 0.into()), + source_balance_at_this_chain: 100, + source_account_at_this_chain: THIS_CHAIN_ACCOUNT, + target_balance_at_bridged_chain: 200, + target_account_at_bridged_chain: BRIDGED_CHAIN_ACCOUNT, + } + } + + fn test_swap_creation() -> TokenSwapCreationOf { + TokenSwapCreation { + target_public_at_bridged_chain: bridged_chain_account_public(), + swap_delivery_and_dispatch_fee: SWAP_DELIVERY_AND_DISPATCH_FEE, + bridged_chain_spec_version: BRIDGED_CHAIN_SPEC_VERSION, + bridged_currency_transfer: test_transfer(), + bridged_currency_transfer_weight: BRIDGED_CHAIN_CALL_WEIGHT, + bridged_currency_transfer_signature: bridged_chain_account_signature(), + } + } + + fn test_swap_hash() -> H256 { + test_swap().using_encoded(blake2_256).into() + } + + fn test_transfer() -> RawBridgedTransferCall { + vec![OK_TRANSFER_CALL] + } + + fn start_test_swap() { + assert_ok!(Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap(), + Box::new(TokenSwapCreation { + target_public_at_bridged_chain: bridged_chain_account_public(), + swap_delivery_and_dispatch_fee: SWAP_DELIVERY_AND_DISPATCH_FEE, + bridged_chain_spec_version: BRIDGED_CHAIN_SPEC_VERSION, + bridged_currency_transfer: test_transfer(), + bridged_currency_transfer_weight: BRIDGED_CHAIN_CALL_WEIGHT, + bridged_currency_transfer_signature: bridged_chain_account_signature(), + }), + )); + } + + fn receive_test_swap_confirmation(success: bool) { + Pallet::::on_messages_delivered( + &OutboundMessageLaneId::get(), + &DeliveredMessages::new(MESSAGE_NONCE, success), + ); + } + + #[test] + fn create_swap_fails_if_origin_is_incorrect() { + run_test(|| { + assert_noop!( + Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT + 1), + test_swap(), + Box::new(test_swap_creation()), + ), + Error::::MismatchedSwapSourceOrigin + ); + }); + } + + #[test] + fn create_swap_fails_if_this_chain_balance_is_below_existential_deposit() { + run_test(|| { + let mut swap = test_swap(); + swap.source_balance_at_this_chain = ExistentialDeposit::get() - 1; + assert_noop!( + Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + swap, + Box::new(test_swap_creation()), + ), + Error::::TooLowBalanceOnThisChain + ); + }); + } + + #[test] + fn create_swap_fails_if_currency_transfer_to_swap_account_fails() { + run_test(|| { + let mut swap = test_swap(); + swap.source_balance_at_this_chain = THIS_CHAIN_ACCOUNT_BALANCE + 1; + assert_noop!( + Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + swap, + Box::new(test_swap_creation()), + ), + Error::::FailedToTransferToSwapAccount + ); + }); + } + + #[test] + fn create_swap_fails_if_send_message_fails() { + run_test(|| { + let mut transfer = test_transfer(); + transfer[0] = BAD_TRANSFER_CALL; + let mut swap_creation = test_swap_creation(); + swap_creation.bridged_currency_transfer = transfer; + assert_noop!( + Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap(), + Box::new(swap_creation), + ), + Error::::FailedToSendTransferMessage + ); + }); + } + + #[test] + fn create_swap_fails_if_swap_is_active() { + run_test(|| { + assert_ok!(Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap(), + Box::new(test_swap_creation()), + )); + + assert_noop!( + Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap(), + Box::new(test_swap_creation()), + ), + Error::::SwapAlreadyStarted + ); + }); + } + + #[test] + fn create_swap_fails_if_trying_to_start_swap_after_lock_period_is_finished() { + run_test(|| { + frame_system::Pallet::::set_block_number(CAN_START_BLOCK_NUMBER + 1); + assert_noop!( + Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap(), + Box::new(test_swap_creation()), + ), + Error::::SwapPeriodIsFinished + ); + }); + } + + #[test] + fn create_swap_succeeds_if_trying_to_start_swap_at_lock_period_end() { + run_test(|| { + frame_system::Pallet::::set_block_number(CAN_START_BLOCK_NUMBER); + assert_ok!(Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap(), + Box::new(test_swap_creation()), + )); + }); + } + + #[test] + fn create_swap_succeeds() { + run_test(|| { + frame_system::Pallet::::set_block_number(1); + frame_system::Pallet::::reset_events(); + + assert_ok!(Pallet::::create_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap(), + Box::new(test_swap_creation()), + )); + + let swap_hash = test_swap_hash(); + assert_eq!(PendingSwaps::::get(swap_hash), Some(TokenSwapState::Started)); + assert_eq!(PendingMessages::::get(MESSAGE_NONCE), Some(swap_hash)); + assert_eq!( + pallet_balances::Pallet::::free_balance(&swap_account_id::< + TestRuntime, + (), + >(&test_swap())), + test_swap().source_balance_at_this_chain + SWAP_DELIVERY_AND_DISPATCH_FEE, + ); + assert!( + frame_system::Pallet::::events().iter().any(|e| e.event == + crate::mock::Event::TokenSwap(crate::Event::SwapStarted( + swap_hash, + MESSAGE_NONCE, + ))), + "Missing SwapStarted event: {:?}", + frame_system::Pallet::::events(), + ); + }); + } + + #[test] + fn claim_swap_fails_if_origin_is_incorrect() { + run_test(|| { + assert_noop!( + Pallet::::claim_swap( + Origin::signed( + 1 + target_account_at_this_chain::(&test_swap()) + ), + test_swap(), + ), + Error::::InvalidClaimant + ); + }); + } + + #[test] + fn claim_swap_fails_if_swap_is_pending() { + run_test(|| { + PendingSwaps::::insert(test_swap_hash(), TokenSwapState::Started); + + assert_noop!( + Pallet::::claim_swap( + Origin::signed(target_account_at_this_chain::(&test_swap())), + test_swap(), + ), + Error::::SwapIsPending + ); + }); + } + + #[test] + fn claim_swap_fails_if_swap_is_failed() { + run_test(|| { + PendingSwaps::::insert(test_swap_hash(), TokenSwapState::Failed); + + assert_noop!( + Pallet::::claim_swap( + Origin::signed(target_account_at_this_chain::(&test_swap())), + test_swap(), + ), + Error::::SwapIsFailed + ); + }); + } + + #[test] + fn claim_swap_fails_if_swap_is_inactive() { + run_test(|| { + assert_noop!( + Pallet::::claim_swap( + Origin::signed(target_account_at_this_chain::(&test_swap())), + test_swap(), + ), + Error::::SwapIsInactive + ); + }); + } + + #[test] + fn claim_swap_fails_if_currency_transfer_from_swap_account_fails() { + run_test(|| { + frame_system::Pallet::::set_block_number(CAN_CLAIM_BLOCK_NUMBER); + PendingSwaps::::insert(test_swap_hash(), TokenSwapState::Confirmed); + + assert_noop!( + Pallet::::claim_swap( + Origin::signed(target_account_at_this_chain::(&test_swap())), + test_swap(), + ), + Error::::FailedToTransferFromSwapAccount + ); + }); + } + + #[test] + fn claim_swap_fails_before_lock_period_is_completed() { + run_test(|| { + start_test_swap(); + receive_test_swap_confirmation(true); + + frame_system::Pallet::::set_block_number(CAN_CLAIM_BLOCK_NUMBER - 1); + + assert_noop!( + Pallet::::claim_swap( + Origin::signed(target_account_at_this_chain::(&test_swap())), + test_swap(), + ), + Error::::SwapIsTemporaryLocked + ); + }); + } + + #[test] + fn claim_swap_succeeds() { + run_test(|| { + start_test_swap(); + receive_test_swap_confirmation(true); + + frame_system::Pallet::::set_block_number(CAN_CLAIM_BLOCK_NUMBER); + frame_system::Pallet::::reset_events(); + + assert_ok!(Pallet::::claim_swap( + Origin::signed(target_account_at_this_chain::(&test_swap())), + test_swap(), + )); + + let swap_hash = test_swap_hash(); + assert_eq!(PendingSwaps::::get(swap_hash), None); + assert_eq!( + pallet_balances::Pallet::::free_balance(&swap_account_id::< + TestRuntime, + (), + >(&test_swap())), + 0, + ); + assert_eq!( + pallet_balances::Pallet::::free_balance( + &target_account_at_this_chain::(&test_swap()), + ), + test_swap().source_balance_at_this_chain, + ); + assert!( + frame_system::Pallet::::events().iter().any(|e| e.event == + crate::mock::Event::TokenSwap(crate::Event::SwapClaimed(swap_hash,))), + "Missing SwapClaimed event: {:?}", + frame_system::Pallet::::events(), + ); + }); + } + + #[test] + fn cancel_swap_fails_if_origin_is_incorrect() { + run_test(|| { + start_test_swap(); + receive_test_swap_confirmation(false); + + assert_noop!( + Pallet::::cancel_swap( + Origin::signed(THIS_CHAIN_ACCOUNT + 1), + test_swap() + ), + Error::::MismatchedSwapSourceOrigin + ); + }); + } + + #[test] + fn cancel_swap_fails_if_swap_is_pending() { + run_test(|| { + start_test_swap(); + + assert_noop!( + Pallet::::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()), + Error::::SwapIsPending + ); + }); + } + + #[test] + fn cancel_swap_fails_if_swap_is_confirmed() { + run_test(|| { + start_test_swap(); + receive_test_swap_confirmation(true); + + assert_noop!( + Pallet::::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()), + Error::::SwapIsConfirmed + ); + }); + } + + #[test] + fn cancel_swap_fails_if_swap_is_inactive() { + run_test(|| { + assert_noop!( + Pallet::::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()), + Error::::SwapIsInactive + ); + }); + } + + #[test] + fn cancel_swap_fails_if_currency_transfer_from_swap_account_fails() { + run_test(|| { + start_test_swap(); + receive_test_swap_confirmation(false); + let _ = pallet_balances::Pallet::::slash( + &swap_account_id::(&test_swap()), + test_swap().source_balance_at_this_chain, + ); + + assert_noop!( + Pallet::::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()), + Error::::FailedToTransferFromSwapAccount + ); + }); + } + + #[test] + fn cancel_swap_succeeds() { + run_test(|| { + start_test_swap(); + receive_test_swap_confirmation(false); + + frame_system::Pallet::::set_block_number(1); + frame_system::Pallet::::reset_events(); + + assert_ok!(Pallet::::cancel_swap( + Origin::signed(THIS_CHAIN_ACCOUNT), + test_swap() + )); + + let swap_hash = test_swap_hash(); + assert_eq!(PendingSwaps::::get(swap_hash), None); + assert_eq!( + pallet_balances::Pallet::::free_balance(&swap_account_id::< + TestRuntime, + (), + >(&test_swap())), + 0, + ); + assert_eq!( + pallet_balances::Pallet::::free_balance(&THIS_CHAIN_ACCOUNT), + THIS_CHAIN_ACCOUNT_BALANCE - SWAP_DELIVERY_AND_DISPATCH_FEE, + ); + assert!( + frame_system::Pallet::::events().iter().any(|e| e.event == + crate::mock::Event::TokenSwap(crate::Event::SwapCanceled(swap_hash,))), + "Missing SwapCanceled event: {:?}", + frame_system::Pallet::::events(), + ); + }); + } + + #[test] + fn messages_delivery_confirmations_are_accepted() { + run_test(|| { + start_test_swap(); + assert_eq!( + PendingMessages::::get(MESSAGE_NONCE), + Some(test_swap_hash()) + ); + assert_eq!( + PendingSwaps::::get(test_swap_hash()), + Some(TokenSwapState::Started) + ); + + // when unrelated messages are delivered + let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 2, true); + messages.note_dispatched_message(false); + Pallet::::on_messages_delivered( + &OutboundMessageLaneId::get(), + &messages, + ); + assert_eq!( + PendingMessages::::get(MESSAGE_NONCE), + Some(test_swap_hash()) + ); + assert_eq!( + PendingSwaps::::get(test_swap_hash()), + Some(TokenSwapState::Started) + ); + + // when message we're interested in is accompanied by a bunch of other messages + let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 1, false); + messages.note_dispatched_message(true); + messages.note_dispatched_message(false); + Pallet::::on_messages_delivered( + &OutboundMessageLaneId::get(), + &messages, + ); + assert_eq!(PendingMessages::::get(MESSAGE_NONCE), None); + assert_eq!( + PendingSwaps::::get(test_swap_hash()), + Some(TokenSwapState::Confirmed) + ); + }); + } +} diff --git a/polkadot/bridges/modules/token-swap/src/mock.rs b/polkadot/bridges/modules/token-swap/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..63edb323e1a4c85f350ad6cbf8d625ec6a19d38d --- /dev/null +++ b/polkadot/bridges/modules/token-swap/src/mock.rs @@ -0,0 +1,187 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate as pallet_bridge_token_swap; +use crate::MessagePayloadOf; + +use bp_messages::{ + source_chain::{MessagesBridge, SendMessageArtifacts}, + LaneId, MessageNonce, +}; +use bp_runtime::ChainId; +use frame_support::weights::Weight; +use sp_core::H256; +use sp_runtime::{ + testing::Header as SubstrateHeader, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; + +pub type AccountId = u64; +pub type Balance = u64; +pub type Block = frame_system::mocking::MockBlock; +pub type BridgedAccountId = u64; +pub type BridgedAccountPublic = sp_runtime::testing::UintAuthorityId; +pub type BridgedAccountSignature = sp_runtime::testing::TestSignature; +pub type BridgedBalance = u64; +pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + +pub const OK_TRANSFER_CALL: u8 = 1; +pub const BAD_TRANSFER_CALL: u8 = 2; +pub const MESSAGE_NONCE: MessageNonce = 3; + +pub const THIS_CHAIN_ACCOUNT: AccountId = 1; +pub const THIS_CHAIN_ACCOUNT_BALANCE: Balance = 100_000; + +pub const SWAP_DELIVERY_AND_DISPATCH_FEE: Balance = 1; + +frame_support::construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Event}, + TokenSwap: pallet_bridge_token_swap::{Pallet, Call, Event}, + } +} + +frame_support::parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = SubstrateHeader; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = frame_support::traits::Everything; + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +frame_support::parameter_types! { + pub const ExistentialDeposit: u64 = 10; + pub const MaxReserves: u32 = 50; +} + +impl pallet_balances::Config for TestRuntime { + type MaxLocks = (); + type Balance = Balance; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = frame_system::Pallet; + type WeightInfo = (); + type MaxReserves = MaxReserves; + type ReserveIdentifier = [u8; 8]; +} + +frame_support::parameter_types! { + pub const BridgedChainId: ChainId = *b"inst"; + pub const OutboundMessageLaneId: LaneId = *b"lane"; +} + +impl pallet_bridge_token_swap::Config for TestRuntime { + type Event = Event; + type WeightInfo = (); + + type BridgedChainId = BridgedChainId; + type OutboundMessageLaneId = OutboundMessageLaneId; + type MessagesBridge = TestMessagesBridge; + + type ThisCurrency = pallet_balances::Pallet; + type FromSwapToThisAccountIdConverter = TestAccountConverter; + + type BridgedChain = BridgedChain; + type FromBridgedToThisAccountIdConverter = TestAccountConverter; +} + +pub struct BridgedChain; + +impl bp_runtime::Chain for BridgedChain { + type BlockNumber = u64; + type Hash = H256; + type Hasher = BlakeTwo256; + type Header = sp_runtime::generic::Header; + + type AccountId = BridgedAccountId; + type Balance = BridgedBalance; + type Index = u64; + type Signature = BridgedAccountSignature; +} + +pub struct TestMessagesBridge; + +impl MessagesBridge> for TestMessagesBridge { + type Error = (); + + fn send_message( + sender: frame_system::RawOrigin, + lane: LaneId, + message: MessagePayloadOf, + delivery_and_dispatch_fee: Balance, + ) -> Result { + assert_ne!(sender, frame_system::RawOrigin::Signed(THIS_CHAIN_ACCOUNT)); + assert_eq!(lane, OutboundMessageLaneId::get()); + assert_eq!(delivery_and_dispatch_fee, SWAP_DELIVERY_AND_DISPATCH_FEE); + match message.call[0] { + OK_TRANSFER_CALL => Ok(SendMessageArtifacts { nonce: MESSAGE_NONCE, weight: 0 }), + BAD_TRANSFER_CALL => Err(()), + _ => unreachable!(), + } + } +} + +pub struct TestAccountConverter; + +impl sp_runtime::traits::Convert for TestAccountConverter { + fn convert(hash: H256) -> AccountId { + hash.to_low_u64_ne() + } +} + +/// Run pallet test. +pub fn run_test(test: impl FnOnce() -> T) -> T { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(THIS_CHAIN_ACCOUNT, THIS_CHAIN_ACCOUNT_BALANCE)], + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(test) +} diff --git a/polkadot/bridges/modules/token-swap/src/weights.rs b/polkadot/bridges/modules/token-swap/src/weights.rs new file mode 100644 index 0000000000000000000000000000000000000000..06cb6b85cf336d4d308309c4aac45f6f2712b495 --- /dev/null +++ b/polkadot/bridges/modules/token-swap/src/weights.rs @@ -0,0 +1,93 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Autogenerated weights for `pallet_bridge_token_swap` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-10-06, STEPS: 50, REPEAT: 20 +//! LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled +//! CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/millau-bridge-node +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bridge_token_swap +// --extrinsic=* +// --execution=wasm +// --wasm-execution=Compiled +// --heap-pages=4096 +// --output=./modules/token-swap/src/weights.rs +// --template=./.maintain/millau-weight-template.hbs + +#![allow(clippy::all)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{ + traits::Get, + weights::{constants::RocksDbWeight, Weight}, +}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for `pallet_bridge_token_swap`. +pub trait WeightInfo { + fn create_swap() -> Weight; + fn claim_swap() -> Weight; + fn cancel_swap() -> Weight; +} + +/// Weights for `pallet_bridge_token_swap` using the Millau node and recommended hardware. +pub struct MillauWeight(PhantomData); +impl WeightInfo for MillauWeight { + fn create_swap() -> Weight { + (116_040_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn claim_swap() -> Weight { + (102_882_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn cancel_swap() -> Weight { + (99_434_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn create_swap() -> Weight { + (116_040_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn claim_swap() -> Weight { + (102_882_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn cancel_swap() -> Weight { + (99_434_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } +} diff --git a/polkadot/bridges/modules/token-swap/src/weights_ext.rs b/polkadot/bridges/modules/token-swap/src/weights_ext.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d27c76cbe68564660b5b547ed2eb8aa2a882c7c --- /dev/null +++ b/polkadot/bridges/modules/token-swap/src/weights_ext.rs @@ -0,0 +1,42 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Weight-related utilities. + +use crate::weights::WeightInfo; + +use bp_runtime::Size; +use frame_support::weights::{RuntimeDbWeight, Weight}; + +/// Extended weight info. +pub trait WeightInfoExt: WeightInfo { + // Functions that are directly mapped to extrinsics weights. + + /// Weight of message send extrinsic. + fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight; +} + +impl WeightInfoExt for () { + fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight { + <() as pallet_bridge_messages::WeightInfoExt>::send_message_weight(message, db_weight) + } +} + +impl WeightInfoExt for crate::weights::MillauWeight { + fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight { + <() as pallet_bridge_messages::WeightInfoExt>::send_message_weight(message, db_weight) + } +} diff --git a/polkadot/bridges/primitives/chain-kusama/Cargo.toml b/polkadot/bridges/primitives/chain-kusama/Cargo.toml index 70ff3b844df07a295c098541f933c82d226cf542..6ff860357c7c451524b106be643d0bbe6e38ebb1 100644 --- a/polkadot/bridges/primitives/chain-kusama/Cargo.toml +++ b/polkadot/bridges/primitives/chain-kusama/Cargo.toml @@ -7,15 +7,20 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +smallvec = "1.7" # Bridge Dependencies + bp-messages = { path = "../messages", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] @@ -23,6 +28,8 @@ std = [ "bp-messages/std", "bp-polkadot-core/std", "bp-runtime/std", + "frame-support/std", "sp-api/std", "sp-std/std", + "sp-version/std", ] diff --git a/polkadot/bridges/primitives/chain-kusama/src/lib.rs b/polkadot/bridges/primitives/chain-kusama/src/lib.rs index e5ab47259e54c2fe31d6d0441fabf49460037f1a..9a6eb66d22865be745cf65c15e85a7ebd2252667 100644 --- a/polkadot/bridges/primitives/chain-kusama/src/lib.rs +++ b/polkadot/bridges/primitives/chain-kusama/src/lib.rs @@ -21,13 +21,46 @@ #![allow(clippy::unnecessary_mut_passed)] use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; +use frame_support::weights::{ + WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, +}; use sp_std::prelude::*; +use sp_version::RuntimeVersion; pub use bp_polkadot_core::*; /// Kusama Chain pub type Kusama = PolkadotLike; +// NOTE: This needs to be kept up to date with the Kusama runtime found in the Polkadot repo. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("kusama"), + impl_name: sp_version::create_runtime_str!("parity-kusama"), + authoring_version: 2, + spec_version: 9100, + impl_version: 0, + apis: sp_version::create_apis_vec![[]], + transaction_version: 5, +}; + +// NOTE: This needs to be kept up to date with the Kusama runtime found in the Polkadot repo. +pub struct WeightToFee; +impl WeightToFeePolynomial for WeightToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + const CENTS: Balance = 1_000_000_000_000 / 30_000; + // in Kusama, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + let p = CENTS; + let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); + smallvec::smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } +} + // We use this to get the account on Kusama (target) which is derived from Polkadot's (source) // account. pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount) -> AccountId { @@ -35,27 +68,53 @@ pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount) AccountIdConverter::convert(encoded_id) } +/// Per-byte fee for Kusama transactions. +pub const TRANSACTION_BYTE_FEE: Balance = 10 * 1_000_000_000_000 / 30_000 / 1_000; + +/// Existential deposit on Kusama. +pub const EXISTENTIAL_DEPOSIT: Balance = 1_000_000_000_000 / 30_000; + +/// The target length of a session (how often authorities change) on Kusama measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. +pub const SESSION_LENGTH: BlockNumber = time_units::HOURS; + +/// Name of the With-Polkadot messages pallet instance in the Kusama runtime. +pub const WITH_POLKADOT_MESSAGES_PALLET_NAME: &str = "BridgePolkadotMessages"; + +/// Name of the DOT->KSM conversion rate stored in the Kusama runtime. +pub const POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME: &str = + "PolkadotToKusamaConversionRate"; + /// Name of the `KusamaFinalityApi::best_finalized` runtime method. pub const BEST_FINALIZED_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_best_finalized"; /// Name of the `KusamaFinalityApi::is_known_header` runtime method. pub const IS_KNOWN_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_is_known_header"; -/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime +/// method. pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; /// Name of the `ToKusamaOutboundLaneApi::message_details` runtime method. pub const TO_KUSAMA_MESSAGE_DETAILS_METHOD: &str = "ToKusamaOutboundLaneApi_message_details"; /// Name of the `ToKusamaOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_generated_nonce"; +pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = + "ToKusamaOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToKusamaOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_received_nonce"; +pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = + "ToKusamaOutboundLaneApi_latest_received_nonce"; /// Name of the `FromKusamaInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_received_nonce"; +pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = + "FromKusamaInboundLaneApi_latest_received_nonce"; /// Name of the `FromKusamaInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_confirmed_nonce"; +pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str = + "FromKusamaInboundLaneApi_latest_confirmed_nonce"; /// Name of the `FromKusamaInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str = "FromKusamaInboundLaneApi_unrewarded_relayers_state"; +pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str = + "FromKusamaInboundLaneApi_unrewarded_relayers_state"; sp_api::decl_runtime_apis! { /// API for querying information about the finalized Kusama headers. diff --git a/polkadot/bridges/primitives/chain-millau/Cargo.toml b/polkadot/bridges/primitives/chain-millau/Cargo.toml index 3628f9092091ab163e8b88f1612422f857068332..f1e17fe96f5ac713214c6d730aae24de87c9c907 100644 --- a/polkadot/bridges/primitives/chain-millau/Cargo.toml +++ b/polkadot/bridges/primitives/chain-millau/Cargo.toml @@ -16,19 +16,20 @@ fixed-hash = { version = "0.7.0", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } impl-codec = { version = "0.5.1", default-features = false } impl-serde = { version = "0.3.1", optional = true } -parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } -serde = { version = "1.0.101", optional = true, features = ["derive"] } +parity-util-mem = { version = "0.10", default-features = false, features = ["primitive-types"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0", optional = true, features = ["derive"] } # Substrate Based Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] @@ -42,6 +43,7 @@ std = [ "impl-codec/std", "impl-serde", "parity-util-mem/std", + "scale-info/std", "serde", "sp-api/std", "sp-core/std", diff --git a/polkadot/bridges/primitives/chain-millau/src/lib.rs b/polkadot/bridges/primitives/chain-millau/src/lib.rs index 0efc54e96e6ad8532f99e067e75f3ed45bb5bef4..0092f7092bc0d309c2d2af0cf425633338f586e6 100644 --- a/polkadot/bridges/primitives/chain-millau/src/lib.rs +++ b/polkadot/bridges/primitives/chain-millau/src/lib.rs @@ -25,14 +25,14 @@ mod millau_hash; use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use bp_runtime::Chain; use frame_support::{ - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight}, Parameter, RuntimeDebug, }; use frame_system::limits; +use scale_info::TypeInfo; use sp_core::Hasher as HasherT; -use sp_runtime::traits::Convert; use sp_runtime::{ - traits::{IdentifyAccount, Verify}, + traits::{Convert, IdentifyAccount, Verify}, MultiSignature, MultiSigner, Perbill, }; use sp_std::prelude::*; @@ -77,29 +77,32 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024; /// Weight of single regular message delivery transaction on Millau chain. /// /// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. -/// The message must have dispatch weight set to zero. The result then must be rounded up to account -/// possible future runtime upgrades. +/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` +/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be +/// rounded up to account possible future runtime upgrades. pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; /// Increase of delivery transaction weight on Millau chain with every additional message byte. /// -/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The -/// result then must be rounded up to account possible future runtime upgrades. +/// This value is a result of +/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then +/// must be rounded up to account possible future runtime upgrades. pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; /// Maximal weight of single message delivery confirmation transaction on Millau chain. /// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation -/// for the case when single message is confirmed. The result then must be rounded up to account possible future -/// runtime upgrades. +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` +/// weight formula computation for the case when single message is confirmed. The result then must +/// be rounded up to account possible future runtime upgrades. pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; /// Weight of pay-dispatch-fee operation for inbound messages at Millau chain. /// -/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` -/// call for your chain. Don't put too much reserve there, because it is used to **decrease** -/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper. +/// This value corresponds to the result of +/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your +/// chain. Don't put too much reserve there, because it is used to **decrease** +/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery +/// transactions cheaper. pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; /// The target length of a session (how often authorities change) on Millau measured in of number of @@ -130,7 +133,7 @@ pub type BlockNumber = u64; /// Hash type used in Millau. pub type Hash = ::Out; -/// The type of an object that can produce hashes on Millau. +/// Type of object that can produce hashes on Millau. pub type Hasher = BlakeTwoAndKeccak256; /// The header type used by Millau. @@ -149,6 +152,12 @@ pub type AccountSigner = MultiSigner; /// Balance of an account. pub type Balance = u64; +/// Index of a transaction in the chain. +pub type Index = u32; + +/// Weight-to-Fee type used by Millau. +pub type WeightToFee = IdentityFee; + /// Millau chain. #[derive(RuntimeDebug)] pub struct Millau; @@ -158,10 +167,15 @@ impl Chain for Millau { type Hash = Hash; type Hasher = Hasher; type Header = Header; + + type AccountId = AccountId; + type Balance = Balance; + type Index = Index; + type Signature = Signature; } /// Millau Hasher (Blake2-256 ++ Keccak-256) implementation. -#[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BlakeTwoAndKeccak256; @@ -245,25 +259,36 @@ pub fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } +/// Name of the With-Rialto messages pallet instance in the Millau runtime. +pub const WITH_RIALTO_MESSAGES_PALLET_NAME: &str = "BridgeRialtoMessages"; +/// Name of the With-Rialto token swap pallet instance in the Millau runtime. +pub const WITH_RIALTO_TOKEN_SWAP_PALLET_NAME: &str = "BridgeRialtoTokenSwap"; + /// Name of the `MillauFinalityApi::best_finalized` runtime method. pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_finalized"; -/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime +/// method. pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; /// Name of the `ToMillauOutboundLaneApi::message_details` runtime method. pub const TO_MILLAU_MESSAGE_DETAILS_METHOD: &str = "ToMillauOutboundLaneApi_message_details"; /// Name of the `ToMillauOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_received_nonce"; +pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = + "ToMillauOutboundLaneApi_latest_received_nonce"; /// Name of the `ToMillauOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_generated_nonce"; +pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str = + "ToMillauOutboundLaneApi_latest_generated_nonce"; /// Name of the `FromMillauInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_received_nonce"; +pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = + "FromMillauInboundLaneApi_latest_received_nonce"; /// Name of the `FromMillauInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_confirmed_nonce"; +pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str = + "FromMillauInboundLaneApi_latest_confirmed_nonce"; /// Name of the `FromMillauInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str = "FromMillauInboundLaneApi_unrewarded_relayers_state"; +pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str = + "FromMillauInboundLaneApi_unrewarded_relayers_state"; sp_api::decl_runtime_apis! { /// API for querying information about the finalized Millau headers. @@ -287,7 +312,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Millau from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -318,7 +343,7 @@ sp_api::decl_runtime_apis! { pub trait FromMillauInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs b/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs index 936791217af1829cccb42a0a2a2661ea3fe759ea..11968b2f2826701ad0bf46f0fd90870df55e997b 100644 --- a/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs +++ b/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs @@ -15,6 +15,7 @@ // along with Parity Bridges Common. If not, see . use parity_util_mem::MallocSizeOf; +use scale_info::TypeInfo; use sp_runtime::traits::CheckEqual; // `sp_core::H512` can't be used, because it doesn't implement `CheckEqual`, which is required @@ -22,7 +23,7 @@ use sp_runtime::traits::CheckEqual; fixed_hash::construct_fixed_hash! { /// Hash type used in Millau chain. - #[derive(MallocSizeOf)] + #[derive(MallocSizeOf, TypeInfo)] pub struct MillauHash(64); } diff --git a/polkadot/bridges/primitives/chain-polkadot/Cargo.toml b/polkadot/bridges/primitives/chain-polkadot/Cargo.toml index 22ded41b9145ca690f423939d7bdc611ecc48c55..917c7f97478390864791e907d9d5bcc8cceb8321 100644 --- a/polkadot/bridges/primitives/chain-polkadot/Cargo.toml +++ b/polkadot/bridges/primitives/chain-polkadot/Cargo.toml @@ -7,16 +7,20 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] +smallvec = "1.7" # Bridge Dependencies + bp-messages = { path = "../messages", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] @@ -24,6 +28,8 @@ std = [ "bp-messages/std", "bp-polkadot-core/std", "bp-runtime/std", + "frame-support/std", "sp-api/std", "sp-std/std", + "sp-version/std", ] diff --git a/polkadot/bridges/primitives/chain-polkadot/src/lib.rs b/polkadot/bridges/primitives/chain-polkadot/src/lib.rs index b0ba77c66ffc34cc7dbb9fd5534832e74cce5c23..26bad1ea8656d1e441b18a6712ca99c55e8a3e97 100644 --- a/polkadot/bridges/primitives/chain-polkadot/src/lib.rs +++ b/polkadot/bridges/primitives/chain-polkadot/src/lib.rs @@ -21,13 +21,46 @@ #![allow(clippy::unnecessary_mut_passed)] use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; +use frame_support::weights::{ + WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, +}; use sp_std::prelude::*; +use sp_version::RuntimeVersion; pub use bp_polkadot_core::*; /// Polkadot Chain pub type Polkadot = PolkadotLike; +// NOTE: This needs to be kept up to date with the Polkadot runtime found in the Polkadot repo. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("polkadot"), + impl_name: sp_version::create_runtime_str!("parity-polkadot"), + authoring_version: 0, + spec_version: 9100, + impl_version: 0, + apis: sp_version::create_apis_vec![[]], + transaction_version: 7, +}; + +// NOTE: This needs to be kept up to date with the Polkadot runtime found in the Polkadot repo. +pub struct WeightToFee; +impl WeightToFeePolynomial for WeightToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + const CENTS: Balance = 10_000_000_000 / 100; + // in Polkadot, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + let p = CENTS; + let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); + smallvec::smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } +} + // We use this to get the account on Polkadot (target) which is derived from Kusama's (source) // account. pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount) -> AccountId { @@ -35,27 +68,53 @@ pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount) - AccountIdConverter::convert(encoded_id) } +/// Per-byte fee for Polkadot transactions. +pub const TRANSACTION_BYTE_FEE: Balance = 10 * 10_000_000_000 / 100 / 1_000; + +/// Existential deposit on Polkadot. +pub const EXISTENTIAL_DEPOSIT: Balance = 10_000_000_000; + +/// The target length of a session (how often authorities change) on Polkadot measured in of number +/// of blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. +pub const SESSION_LENGTH: BlockNumber = 4 * time_units::HOURS; + +/// Name of the With-Kusama messages pallet instance in the Polkadot runtime. +pub const WITH_KUSAMA_MESSAGES_PALLET_NAME: &str = "BridgeKusamaMessages"; + +/// Name of the KSM->DOT conversion rate stored in the Polkadot runtime. +pub const KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME: &str = + "KusamaToPolkadotConversionRate"; + /// Name of the `PolkadotFinalityApi::best_finalized` runtime method. pub const BEST_FINALIZED_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_best_finalized"; /// Name of the `PolkadotFinalityApi::is_known_header` runtime method. pub const IS_KNOWN_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_is_known_header"; -/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime +/// method. pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; /// Name of the `ToPolkadotOutboundLaneApi::message_details` runtime method. pub const TO_POLKADOT_MESSAGE_DETAILS_METHOD: &str = "ToPolkadotOutboundLaneApi_message_details"; /// Name of the `ToPolkadotOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_generated_nonce"; +pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = + "ToPolkadotOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToPolkadotOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_received_nonce"; +pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = + "ToPolkadotOutboundLaneApi_latest_received_nonce"; /// Name of the `FromPolkadotInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_received_nonce"; +pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = + "FromPolkadotInboundLaneApi_latest_received_nonce"; /// Name of the `FromPolkadotInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_confirmed_nonce"; +pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str = + "FromPolkadotInboundLaneApi_latest_confirmed_nonce"; /// Name of the `FromPolkadotInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str = "FromPolkadotInboundLaneApi_unrewarded_relayers_state"; +pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str = + "FromPolkadotInboundLaneApi_unrewarded_relayers_state"; sp_api::decl_runtime_apis! { /// API for querying information about the finalized Polkadot headers. diff --git a/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml b/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..034188631b8cde608025ee64baa5b6de1b9be698 --- /dev/null +++ b/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "bp-rialto-parachain" +description = "Primitives of Rialto parachain runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] + +# Bridge Dependencies + +bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-runtime/std", + "frame-support/std", + "frame-system/std", + "sp-api/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs b/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..826f6d39bd7f73b9d04578d34715c25e5cc530af --- /dev/null +++ b/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs @@ -0,0 +1,128 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_With_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_runtime::Chain; +use frame_support::{ + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight}, + RuntimeDebug, +}; +use frame_system::limits; +use sp_core::Hasher as HasherT; +use sp_runtime::{ + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiSignature, MultiSigner, Perbill, +}; + +/// Maximal weight of single Rialto parachain block. +/// +/// This represents two seconds of compute assuming a target block time of six seconds. +pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + +/// Represents the average portion of a block's weight that will be used by an +/// `on_initialize()` runtime call. +pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); + +/// Represents the portion of a block that will be used by Normal extrinsics. +pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +/// Block number type used in Rialto. +pub type BlockNumber = u32; + +/// Hash type used in Rialto. +pub type Hash = ::Out; + +/// The type of object that can produce hashes on Rialto. +pub type Hasher = BlakeTwo256; + +/// The header type used by Rialto. +pub type Header = sp_runtime::generic::Header; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// Public key of the chain account that may be used to verify signatures. +pub type AccountSigner = MultiSigner; + +/// Balance of an account. +pub type Balance = u128; + +/// An instant or duration in time. +pub type Moment = u64; + +/// Index of a transaction in the parachain. +pub type Index = u32; + +/// Weight-to-Fee type used by Rialto parachain. +pub type WeightToFee = IdentityFee; + +/// Rialto parachain. +#[derive(RuntimeDebug)] +pub struct RialtoParachain; + +impl Chain for RialtoParachain { + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hasher = Hasher; + type Header = Header; + + type AccountId = AccountId; + type Balance = Balance; + type Index = Index; + type Signature = Signature; +} + +frame_support::parameter_types! { + pub BlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + // Allowance for Normal class + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + // Allowance for Operational class + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Extra reserved space for Operational class + weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + // By default Mandatory class is not limited at all. + // This parameter is used to derive maximal size of a single extrinsic. + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +/// Get the maximum weight (compute time) that a Normal extrinsic on the Millau chain can use. +pub fn max_extrinsic_weight() -> Weight { + BlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic + .unwrap_or(Weight::MAX) +} + +/// Get the maximum length in bytes that a Normal extrinsic on the Millau chain requires. +pub fn max_extrinsic_size() -> u32 { + *BlockLength::get().max.get(DispatchClass::Normal) +} diff --git a/polkadot/bridges/primitives/chain-rialto/Cargo.toml b/polkadot/bridges/primitives/chain-rialto/Cargo.toml index 7e039a40acd964c9036c8fd3dd81e40baeb431de..d16ac59484fb5da33c5f0d23d6eeadbe5a04bee0 100644 --- a/polkadot/bridges/primitives/chain-rialto/Cargo.toml +++ b/polkadot/bridges/primitives/chain-rialto/Cargo.toml @@ -15,12 +15,12 @@ bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] diff --git a/polkadot/bridges/primitives/chain-rialto/src/lib.rs b/polkadot/bridges/primitives/chain-rialto/src/lib.rs index 8139372959e357c2b4df5232f606aad88e903acf..6c4e48301e3bbd6abbadffb0985245d2d605e84d 100644 --- a/polkadot/bridges/primitives/chain-rialto/src/lib.rs +++ b/polkadot/bridges/primitives/chain-rialto/src/lib.rs @@ -23,7 +23,7 @@ use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; use bp_runtime::Chain; use frame_support::{ - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight}, Parameter, RuntimeDebug, }; use frame_system::limits; @@ -42,7 +42,7 @@ pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// Number of bytes, included in the signed Rialto transaction apart from the encoded call itself. /// /// Can be computed by subtracting encoded call size from raw transaction size. -pub const TX_EXTRA_BYTES: u32 = 103; +pub const TX_EXTRA_BYTES: u32 = 104; /// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; @@ -68,29 +68,32 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128; /// Weight of single regular message delivery transaction on Rialto chain. /// /// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. -/// The message must have dispatch weight set to zero. The result then must be rounded up to account -/// possible future runtime upgrades. +/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` +/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be +/// rounded up to account possible future runtime upgrades. pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; /// Increase of delivery transaction weight on Rialto chain with every additional message byte. /// -/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The -/// result then must be rounded up to account possible future runtime upgrades. +/// This value is a result of +/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then +/// must be rounded up to account possible future runtime upgrades. pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; /// Maximal weight of single message delivery confirmation transaction on Rialto chain. /// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation -/// for the case when single message is confirmed. The result then must be rounded up to account possible future -/// runtime upgrades. +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` +/// weight formula computation for the case when single message is confirmed. The result then must +/// be rounded up to account possible future runtime upgrades. pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; /// Weight of pay-dispatch-fee operation for inbound messages at Rialto chain. /// -/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` -/// call for your chain. Don't put too much reserve there, because it is used to **decrease** -/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper. +/// This value corresponds to the result of +/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your +/// chain. Don't put too much reserve there, because it is used to **decrease** +/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery +/// transactions cheaper. pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; /// The target length of a session (how often authorities change) on Rialto measured in of number of @@ -105,7 +108,7 @@ pub use time_units::*; /// Human readable time units defined in terms of number of blocks. pub mod time_units { - use super::BlockNumber; + use super::{BlockNumber, SESSION_LENGTH}; pub const MILLISECS_PER_BLOCK: u64 = 6000; pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; @@ -113,6 +116,11 @@ pub mod time_units { pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; + + pub const EPOCH_DURATION_IN_SLOTS: BlockNumber = SESSION_LENGTH; + + // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. + pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); } /// Block number type used in Rialto. @@ -121,7 +129,7 @@ pub type BlockNumber = u32; /// Hash type used in Rialto. pub type Hash = ::Out; -/// The type of an object that can produce hashes on Rialto. +/// The type of object that can produce hashes on Rialto. pub type Hasher = BlakeTwo256; /// The header type used by Rialto. @@ -140,6 +148,15 @@ pub type AccountSigner = MultiSigner; /// Balance of an account. pub type Balance = u128; +/// An instant or duration in time. +pub type Moment = u64; + +/// Index of a transaction in the chain. +pub type Index = u32; + +/// Weight-to-Fee type used by Rialto. +pub type WeightToFee = IdentityFee; + /// Rialto chain. #[derive(RuntimeDebug)] pub struct Rialto; @@ -149,6 +166,11 @@ impl Chain for Rialto { type Hash = Hash; type Hasher = Hasher; type Header = Header; + + type AccountId = AccountId; + type Balance = Balance; + type Index = Index; + type Signature = Signature; } /// Convert a 256-bit hash into an AccountId. @@ -206,25 +228,40 @@ pub fn max_extrinsic_size() -> u32 { *BlockLength::get().max.get(DispatchClass::Normal) } +/// Name of the With-Millau messages pallet instance in the Rialto runtime. +pub const WITH_MILLAU_MESSAGES_PALLET_NAME: &str = "BridgeMillauMessages"; + +/// Name of the parachain registrar pallet in the Rialto runtime. +pub const PARAS_REGISTRAR_PALLET_NAME: &str = "Registrar"; + +/// Name of the parachains pallet in the Rialto runtime. +pub const PARAS_PALLET_NAME: &str = "Paras"; + /// Name of the `RialtoFinalityApi::best_finalized` runtime method. pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_finalized"; -/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime +/// method. pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; /// Name of the `ToRialtoOutboundLaneApi::message_details` runtime method. pub const TO_RIALTO_MESSAGE_DETAILS_METHOD: &str = "ToRialtoOutboundLaneApi_message_details"; /// Name of the `ToRialtoOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_generated_nonce"; +pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = + "ToRialtoOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToRialtoOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_received_nonce"; +pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = + "ToRialtoOutboundLaneApi_latest_received_nonce"; /// Name of the `FromRialtoInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_received_nonce"; +pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = + "FromRialtoInboundLaneApi_latest_received_nonce"; /// Name of the `FromRialtoInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_confirmed_nonce"; +pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str = + "FromRialtoInboundLaneApi_latest_confirmed_nonce"; /// Name of the `FromRialtoInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str = "FromRialtoInboundLaneApi_unrewarded_relayers_state"; +pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str = + "FromRialtoInboundLaneApi_unrewarded_relayers_state"; sp_api::decl_runtime_apis! { /// API for querying information about the finalized Rialto headers. @@ -248,7 +285,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Rialto from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -279,7 +316,7 @@ sp_api::decl_runtime_apis! { pub trait FromRialtoInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/polkadot/bridges/primitives/chain-rococo/Cargo.toml b/polkadot/bridges/primitives/chain-rococo/Cargo.toml index 33772c7890a07c495296ff683d6a1b8baf7f7fcc..6e1189b05f3635d9b4fbd8c0e965ea35b2fda3f8 100644 --- a/polkadot/bridges/primitives/chain-rococo/Cargo.toml +++ b/polkadot/bridges/primitives/chain-rococo/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } -smallvec = "1.6" +parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] } +smallvec = "1.7" # Bridge Dependencies bp-messages = { path = "../messages", default-features = false } @@ -16,8 +16,8 @@ bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/polkadot/bridges/primitives/chain-rococo/src/lib.rs b/polkadot/bridges/primitives/chain-rococo/src/lib.rs index b4faae00eeb31c84d17602088688402a55708b37..b3bbc91976dac1c796be3fe91c9d8f2a36e9b3dd 100644 --- a/polkadot/bridges/primitives/chain-rococo/src/lib.rs +++ b/polkadot/bridges/primitives/chain-rococo/src/lib.rs @@ -21,7 +21,9 @@ #![allow(clippy::unnecessary_mut_passed)] use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; -use frame_support::weights::{WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial}; +use frame_support::weights::{ + Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, +}; use sp_std::prelude::*; use sp_version::RuntimeVersion; @@ -30,8 +32,8 @@ pub use bp_polkadot_core::*; /// Rococo Chain pub type Rococo = PolkadotLike; -/// The target length of a session (how often authorities change) on Westend measured in of number of -/// blocks. +/// The target length of a session (how often authorities change) on Westend measured in of number +/// of blocks. /// /// Note that since this is a target sessions may change before/after this time depending on network /// conditions. @@ -72,27 +74,45 @@ pub fn derive_account_from_wococo_id(id: bp_runtime::SourceAccount) - AccountIdConverter::convert(encoded_id) } +/// Name of the With-Wococo messages pallet instance in the Rococo runtime. +pub const WITH_WOCOCO_MESSAGES_PALLET_NAME: &str = "BridgeWococoMessages"; + /// Name of the `RococoFinalityApi::best_finalized` runtime method. pub const BEST_FINALIZED_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_best_finalized"; /// Name of the `RococoFinalityApi::is_known_header` runtime method. pub const IS_KNOWN_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_is_known_header"; -/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime +/// method. pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; /// Name of the `ToRococoOutboundLaneApi::message_details` runtime method. pub const TO_ROCOCO_MESSAGE_DETAILS_METHOD: &str = "ToRococoOutboundLaneApi_message_details"; /// Name of the `ToRococoOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_generated_nonce"; +pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = + "ToRococoOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToRococoOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_received_nonce"; +pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = + "ToRococoOutboundLaneApi_latest_received_nonce"; /// Name of the `FromRococoInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_received_nonce"; +pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = + "FromRococoInboundLaneApi_latest_received_nonce"; /// Name of the `FromRococoInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_confirmed_nonce"; +pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = + "FromRococoInboundLaneApi_latest_confirmed_nonce"; /// Name of the `FromRococoInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromRococoInboundLaneApi_unrewarded_relayers_state"; +pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str = + "FromRococoInboundLaneApi_unrewarded_relayers_state"; + +/// Weight of pay-dispatch-fee operation for inbound messages at Rococo chain. +/// +/// This value corresponds to the result of +/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your +/// chain. Don't put too much reserve there, because it is used to **decrease** +/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery +/// transactions cheaper. +pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; sp_api::decl_runtime_apis! { /// API for querying information about the finalized Rococo headers. diff --git a/polkadot/bridges/primitives/chain-westend/Cargo.toml b/polkadot/bridges/primitives/chain-westend/Cargo.toml index d5fda1ccef05a1c5ee8b9ffa2cf32cd107b43d98..4fd1652744ed6473690016bcf9b812ae29505c85 100644 --- a/polkadot/bridges/primitives/chain-westend/Cargo.toml +++ b/polkadot/bridges/primitives/chain-westend/Cargo.toml @@ -7,16 +7,21 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +smallvec = "1.7" # Bridge Dependencies + bp-header-chain = { path = "../header-chain", default-features = false } bp-messages = { path = "../messages", default-features = false } bp-polkadot-core = { path = "../polkadot-core", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } @@ -28,7 +33,9 @@ std = [ "bp-messages/std", "bp-polkadot-core/std", "bp-runtime/std", + "frame-support/std", "parity-scale-codec/std", + "scale-info/std", "sp-api/std", "sp-runtime/std", "sp-std/std", diff --git a/polkadot/bridges/primitives/chain-westend/src/lib.rs b/polkadot/bridges/primitives/chain-westend/src/lib.rs index e3c4d733def9db8fedd4b824170c899e5b7867d0..8beb897f59a15b8b22deeb88fd6099147aab98a9 100644 --- a/polkadot/bridges/primitives/chain-westend/src/lib.rs +++ b/polkadot/bridges/primitives/chain-westend/src/lib.rs @@ -21,7 +21,10 @@ #![allow(clippy::unnecessary_mut_passed)] use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState}; -use bp_runtime::Chain; +use frame_support::weights::{ + WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, +}; +use scale_info::TypeInfo; use sp_std::prelude::*; use sp_version::RuntimeVersion; @@ -30,7 +33,23 @@ pub use bp_polkadot_core::*; /// Westend Chain pub type Westend = PolkadotLike; -pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; +// NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo. +pub struct WeightToFee; +impl WeightToFeePolynomial for WeightToFee { + type Balance = Balance; + fn polynomial() -> WeightToFeeCoefficients { + const CENTS: Balance = 1_000_000_000_000 / 1_000; + // in Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: + let p = CENTS; + let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); + smallvec::smallvec![WeightToFeeCoefficient { + degree: 1, + negative: false, + coeff_frac: Perbill::from_rational(p % q, q), + coeff_integer: p / q, + }] + } +} // NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo. pub const VERSION: RuntimeVersion = RuntimeVersion { @@ -45,32 +64,11 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { /// Westend Runtime `Call` enum. /// -/// The enum represents a subset of possible `Call`s we can send to Westend chain. -/// Ideally this code would be auto-generated from Metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with Westend -/// `construct_runtime`, so that we maintain SCALE-compatibility. -/// -/// See: https://github.com/paritytech/polkadot/blob/master/runtime/westend/src/lib.rs -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] -pub enum Call { - /// Rococo bridge pallet. - #[codec(index = 40)] - BridgeGrandpaRococo(BridgeGrandpaRococoCall), -} - -#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] -#[allow(non_camel_case_types)] -pub enum BridgeGrandpaRococoCall { - #[codec(index = 0)] - submit_finality_proof( - ::Header, - bp_header_chain::justification::GrandpaJustification<::Header>, - ), - #[codec(index = 1)] - initialize(bp_header_chain::InitializationData<::Header>), -} +/// We are not currently submitting any Westend transactions => it is empty. +#[derive( + parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone, TypeInfo, +)] +pub enum Call {} impl sp_runtime::traits::Dispatchable for Call { type Origin = (); @@ -95,25 +93,31 @@ pub const BEST_FINALIZED_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_best_ /// Name of the `WestendFinalityApi::is_known_header` runtime method. pub const IS_KNOWN_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_is_known_header"; -/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime +/// method. pub const TO_WESTEND_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToWestendOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; /// Name of the `ToWestendOutboundLaneApi::message_details` runtime method. pub const TO_WESTEND_MESSAGE_DETAILS_METHOD: &str = "ToWestendOutboundLaneApi_message_details"; /// Name of the `ToWestendOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_generated_nonce"; +pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = + "ToWestendOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToWestendOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_received_nonce"; +pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = + "ToWestendOutboundLaneApi_latest_received_nonce"; /// Name of the `FromWestendInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_received_nonce"; +pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = + "FromWestendInboundLaneApi_latest_received_nonce"; /// Name of the `FromWestendInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_confirmed_nonce"; +pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str = + "FromWestendInboundLaneApi_latest_confirmed_nonce"; /// Name of the `FromWestendInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str = "FromWestendInboundLaneApi_unrewarded_relayers_state"; +pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str = + "FromWestendInboundLaneApi_unrewarded_relayers_state"; -/// The target length of a session (how often authorities change) on Westend measured in of number of -/// blocks. +/// The target length of a session (how often authorities change) on Westend measured in of number +/// of blocks. /// /// Note that since this is a target sessions may change before/after this time depending on network /// conditions. diff --git a/polkadot/bridges/primitives/chain-wococo/Cargo.toml b/polkadot/bridges/primitives/chain-wococo/Cargo.toml index 88201dde9ac191f392613de93ec999935568d3ac..d99783695ad313828e2dd4c4f259233c39b99ed5 100644 --- a/polkadot/bridges/primitives/chain-wococo/Cargo.toml +++ b/polkadot/bridges/primitives/chain-wococo/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] } # Bridge Dependencies bp-messages = { path = "../messages", default-features = false } @@ -16,7 +16,7 @@ bp-rococo = { path = "../chain-rococo", default-features = false } bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/polkadot/bridges/primitives/chain-wococo/src/lib.rs b/polkadot/bridges/primitives/chain-wococo/src/lib.rs index 24572e141b20fb001179d676131cf1eb5089fb7d..fe2ce3a309a6bf6ab6ab16bf2390980472a8511b 100644 --- a/polkadot/bridges/primitives/chain-wococo/src/lib.rs +++ b/polkadot/bridges/primitives/chain-wococo/src/lib.rs @@ -25,7 +25,7 @@ use sp_std::prelude::*; pub use bp_polkadot_core::*; // Rococo runtime = Wococo runtime -pub use bp_rococo::{WeightToFee, SESSION_LENGTH, VERSION}; +pub use bp_rococo::{WeightToFee, PAY_INBOUND_DISPATCH_FEE_WEIGHT, SESSION_LENGTH, VERSION}; /// Wococo Chain pub type Wococo = PolkadotLike; @@ -37,27 +37,36 @@ pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount) - AccountIdConverter::convert(encoded_id) } +/// Name of the With-Rococo messages pallet instance in the Wococo runtime. +pub const WITH_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessages"; + /// Name of the `WococoFinalityApi::best_finalized` runtime method. pub const BEST_FINALIZED_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_best_finalized"; /// Name of the `WococoFinalityApi::is_known_header` runtime method. pub const IS_KNOWN_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_is_known_header"; -/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime +/// method. pub const TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = "ToWococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; /// Name of the `ToWococoOutboundLaneApi::message_details` runtime method. pub const TO_WOCOCO_MESSAGE_DETAILS_METHOD: &str = "ToWococoOutboundLaneApi_message_details"; /// Name of the `ToWococoOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_generated_nonce"; +pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str = + "ToWococoOutboundLaneApi_latest_generated_nonce"; /// Name of the `ToWococoOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_received_nonce"; +pub const TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = + "ToWococoOutboundLaneApi_latest_received_nonce"; /// Name of the `FromWococoInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWococoInboundLaneApi_latest_received_nonce"; +pub const FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = + "FromWococoInboundLaneApi_latest_received_nonce"; /// Name of the `FromWococoInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWococoInboundLaneApi_latest_confirmed_nonce"; +pub const FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = + "FromWococoInboundLaneApi_latest_confirmed_nonce"; /// Name of the `FromWococoInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_WOCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromWococoInboundLaneApi_unrewarded_relayers_state"; +pub const FROM_WOCOCO_UNREWARDED_RELAYERS_STATE: &str = + "FromWococoInboundLaneApi_unrewarded_relayers_state"; sp_api::decl_runtime_apis! { /// API for querying information about the finalized Wococo headers. @@ -81,7 +90,7 @@ sp_api::decl_runtime_apis! { /// /// Returns `None` if message is too expensive to be sent to Wococo from this chain. /// - /// Please keep in mind that this method returns lowest message fee required for message + /// Please keep in mind that this method returns the lowest message fee required for message /// to be accepted to the lane. It may be good idea to pay a bit over this price to account /// future exchange rate changes and guarantee that relayer would deliver your message /// to the target chain. @@ -112,7 +121,7 @@ sp_api::decl_runtime_apis! { pub trait FromWococoInboundLaneApi { /// Returns nonce of the latest message, received by given lane. fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. + /// Nonce of the latest message that has been confirmed to the bridged chain. fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; /// State of the unrewarded relayers set at given lane. fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; diff --git a/polkadot/bridges/primitives/currency-exchange/src/lib.rs b/polkadot/bridges/primitives/currency-exchange/src/lib.rs deleted file mode 100644 index 88695dbb5ef406cedd997fe434aa8f5e39befb35..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/currency-exchange/src/lib.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -use codec::{Decode, Encode, EncodeLike}; -use frame_support::{Parameter, RuntimeDebug}; -use sp_api::decl_runtime_apis; -use sp_std::marker::PhantomData; - -/// All errors that may happen during exchange. -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockhain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, -} - -/// Result of all exchange operations. -pub type Result = sp_std::result::Result; - -/// Peer blockchain lock funds transaction. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] -pub struct LockFundsTransaction { - /// Something that uniquely identifies this transfer. - pub id: TransferId, - /// Funds recipient on the peer chain. - pub recipient: Recipient, - /// Amount of the locked funds. - pub amount: Amount, -} - -/// Peer blockchain transaction that may represent lock funds transaction. -pub trait MaybeLockFundsTransaction { - /// Transaction type. - type Transaction; - /// Identifier that uniquely identifies this transfer. - type Id: Decode + Encode + EncodeLike + sp_std::fmt::Debug; - /// Peer recipient type. - type Recipient; - /// Peer currency amount type. - type Amount; - - /// Parse lock funds transaction of the peer blockchain. Returns None if - /// transaction format is unknown, or it isn't a lock funds transaction. - fn parse(tx: &Self::Transaction) -> Result>; -} - -/// Map that maps recipients from peer blockchain to this blockchain recipients. -pub trait RecipientsMap { - /// Peer blockchain recipient type. - type PeerRecipient; - /// Current blockchain recipient type. - type Recipient; - - /// Lookup current blockchain recipient by peer blockchain recipient. - fn map(peer_recipient: Self::PeerRecipient) -> Result; -} - -/// Conversion between two currencies. -pub trait CurrencyConverter { - /// Type of the source currency amount. - type SourceAmount; - /// Type of the target currency amount. - type TargetAmount; - - /// Covert from source to target currency. - fn convert(amount: Self::SourceAmount) -> Result; -} - -/// Currency deposit. -pub trait DepositInto { - /// Recipient type. - type Recipient; - /// Currency amount type. - type Amount; - - /// Grant some money to given account. - fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> Result<()>; -} - -/// Recipients map which is used when accounts ids are the same on both chains. -#[derive(Debug)] -pub struct IdentityRecipients(PhantomData); - -impl RecipientsMap for IdentityRecipients { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map(peer_recipient: Self::PeerRecipient) -> Result { - Ok(peer_recipient) - } -} - -/// Currency converter which is used when currency is the same on both chains. -#[derive(Debug)] -pub struct IdentityCurrencyConverter(PhantomData); - -impl CurrencyConverter for IdentityCurrencyConverter { - type SourceAmount = Amount; - type TargetAmount = Amount; - - fn convert(currency: Self::SourceAmount) -> Result { - Ok(currency) - } -} - -decl_runtime_apis! { - /// API for Rialto exchange transactions submitters. - pub trait RialtoCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } - - /// API for Kovan exchange transactions submitters. - pub trait KovanCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } -} diff --git a/polkadot/bridges/primitives/ethereum-poa/Cargo.toml b/polkadot/bridges/primitives/ethereum-poa/Cargo.toml deleted file mode 100644 index cd2c3a97a0f32095dd8812246b663235fc8f3099..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/ethereum-poa/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -name = "bp-eth-poa" -description = "Primitives of Ethereum PoA Bridge module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -ethbloom = { version = "0.10.0", default-features = false, features = ["rlp"] } -fixed-hash = { version = "0.7", default-features = false } -hash-db = { version = "0.15.2", default-features = false } -impl-rlp = { version = "0.3", default-features = false } -impl-serde = { version = "0.3.1", optional = true } -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } -parity-bytes = { version = "0.1", default-features = false } -plain_hasher = { version = "0.2.2", default-features = false } -primitive-types = { version = "0.9", default-features = false, features = ["codec", "rlp"] } -rlp = { version = "0.5", default-features = false } -serde = { version = "1.0", optional = true } -serde-big-array = { version = "0.2", optional = true } -triehash = { version = "0.8.2", default-features = false } - -# Substrate Dependencies - -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -hex-literal = "0.2" - -[features] -default = ["std"] -std = [ - "codec/std", - "ethbloom/std", - "fixed-hash/std", - "hash-db/std", - "impl-rlp/std", - "impl-serde", - "libsecp256k1/std", - "parity-bytes/std", - "plain_hasher/std", - "primitive-types/std", - "primitive-types/serde", - "rlp/std", - "serde/std", - "serde-big-array", - "sp-api/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "triehash/std", -] diff --git a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs b/polkadot/bridges/primitives/ethereum-poa/src/lib.rs deleted file mode 100644 index b912262992d2ad4495ff2a954af7c9321c4bc697..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -pub use parity_bytes::Bytes; -pub use primitive_types::{H160, H256, H512, U128, U256}; -pub use rlp::encode as rlp_encode; - -use codec::{Decode, Encode}; -use ethbloom::{Bloom as EthBloom, Input as BloomInput}; -use fixed_hash::construct_fixed_hash; -use rlp::{Decodable, DecoderError, Rlp, RlpStream}; -use sp_io::hashing::keccak_256; -use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; - -use impl_rlp::impl_fixed_hash_rlp; -#[cfg(feature = "std")] -use impl_serde::impl_fixed_hash_serde; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use serde_big_array::big_array; - -construct_fixed_hash! { pub struct H520(65); } -impl_fixed_hash_rlp!(H520, 65); -#[cfg(feature = "std")] -impl_fixed_hash_serde!(H520, 65); - -/// Raw (RLP-encoded) ethereum transaction. -pub type RawTransaction = Vec; - -/// Raw (RLP-encoded) ethereum transaction receipt. -pub type RawTransactionReceipt = Vec; - -/// An ethereum address. -pub type Address = H160; - -pub mod signatures; - -/// Complete header id. -#[derive(Encode, Decode, Default, RuntimeDebug, PartialEq, Clone, Copy)] -pub struct HeaderId { - /// Header number. - pub number: u64, - /// Header hash. - pub hash: H256, -} - -/// An Aura header. -#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct AuraHeader { - /// Parent block hash. - pub parent_hash: H256, - /// Block timestamp. - pub timestamp: u64, - /// Block number. - pub number: u64, - /// Block author. - pub author: Address, - - /// Transactions root. - pub transactions_root: H256, - /// Block uncles hash. - pub uncles_hash: H256, - /// Block extra data. - pub extra_data: Bytes, - - /// State root. - pub state_root: H256, - /// Block receipts root. - pub receipts_root: H256, - /// Block bloom. - pub log_bloom: Bloom, - /// Gas used for contracts execution. - pub gas_used: U256, - /// Block gas limit. - pub gas_limit: U256, - - /// Block difficulty. - pub difficulty: U256, - /// Vector of post-RLP-encoded fields. - pub seal: Vec, -} - -/// Parsed ethereum transaction. -#[derive(PartialEq, RuntimeDebug)] -pub struct Transaction { - /// Sender address. - pub sender: Address, - /// Unsigned portion of ethereum transaction. - pub unsigned: UnsignedTransaction, -} - -/// Unsigned portion of ethereum transaction. -#[derive(Clone, PartialEq, RuntimeDebug)] -pub struct UnsignedTransaction { - /// Sender nonce. - pub nonce: U256, - /// Gas price. - pub gas_price: U256, - /// Gas limit. - pub gas: U256, - /// Transaction destination address. None if it is contract creation transaction. - pub to: Option
, - /// Value. - pub value: U256, - /// Associated data. - pub payload: Bytes, -} - -/// Information describing execution of a transaction. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct Receipt { - /// The total gas used in the block following execution of the transaction. - pub gas_used: U256, - /// The OR-wide combination of all logs' blooms for this transaction. - pub log_bloom: Bloom, - /// The logs stemming from this transaction. - pub logs: Vec, - /// Transaction outcome. - pub outcome: TransactionOutcome, -} - -/// Transaction outcome store in the receipt. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub enum TransactionOutcome { - /// Status and state root are unknown under EIP-98 rules. - Unknown, - /// State root is known. Pre EIP-98 and EIP-658 rules. - StateRoot(H256), - /// Status code is known. EIP-658 rules. - StatusCode(u8), -} - -/// A record of execution for a `LOG` operation. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct LogEntry { - /// The address of the contract executing at the point of the `LOG` operation. - pub address: Address, - /// The topics associated with the `LOG` operation. - pub topics: Vec, - /// The data associated with the `LOG` operation. - pub data: Bytes, -} - -/// Logs bloom. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct Bloom(#[cfg_attr(feature = "std", serde(with = "BigArray"))] [u8; 256]); - -#[cfg(feature = "std")] -big_array! { BigArray; } - -/// An empty step message that is included in a seal, the only difference is that it doesn't include -/// the `parent_hash` in order to save space. The included signature is of the original empty step -/// message, which can be reconstructed by using the parent hash of the block in which this sealed -/// empty message is included. -pub struct SealedEmptyStep { - /// Signature of the original message author. - pub signature: H520, - /// The step this message is generated for. - pub step: u64, -} - -impl AuraHeader { - /// Compute id of this header. - pub fn compute_id(&self) -> HeaderId { - HeaderId { - number: self.number, - hash: self.compute_hash(), - } - } - - /// Compute hash of this header (keccak of the RLP with seal). - pub fn compute_hash(&self) -> H256 { - keccak_256(&self.rlp(true)).into() - } - - /// Get id of this header' parent. Returns None if this is genesis header. - pub fn parent_id(&self) -> Option { - self.number.checked_sub(1).map(|parent_number| HeaderId { - number: parent_number, - hash: self.parent_hash, - }) - } - - /// Check if passed transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_receipts_root(&self, receipts: &[Receipt]) -> Result { - check_merkle_proof(self.receipts_root, receipts.iter().map(|r| r.rlp())) - } - - /// Check if passed raw transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_raw_receipts_root<'a>( - &self, - receipts: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.receipts_root, receipts.into_iter()) - } - - /// Check if passed transactions are matching transactions root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_transactions_root<'a>( - &self, - transactions: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.transactions_root, transactions.into_iter()) - } - - /// Gets the seal hash of this header. - pub fn seal_hash(&self, include_empty_steps: bool) -> Option { - Some(match include_empty_steps { - true => { - let mut message = self.compute_hash().as_bytes().to_vec(); - message.extend_from_slice(self.seal.get(2)?); - keccak_256(&message).into() - } - false => keccak_256(&self.rlp(false)).into(), - }) - } - - /// Get step this header is generated for. - pub fn step(&self) -> Option { - self.seal.get(0).map(|x| Rlp::new(x)).and_then(|x| x.as_val().ok()) - } - - /// Get header author' signature. - pub fn signature(&self) -> Option { - self.seal.get(1).and_then(|x| Rlp::new(x).as_val().ok()) - } - - /// Extracts the empty steps from the header seal. - pub fn empty_steps(&self) -> Option> { - self.seal - .get(2) - .and_then(|x| Rlp::new(x).as_list::().ok()) - } - - /// Returns header RLP with or without seals. - fn rlp(&self, with_seal: bool) -> Bytes { - let mut s = RlpStream::new(); - if with_seal { - s.begin_list(13 + self.seal.len()); - } else { - s.begin_list(13); - } - - s.append(&self.parent_hash); - s.append(&self.uncles_hash); - s.append(&self.author); - s.append(&self.state_root); - s.append(&self.transactions_root); - s.append(&self.receipts_root); - s.append(&EthBloom::from(self.log_bloom.0)); - s.append(&self.difficulty); - s.append(&self.number); - s.append(&self.gas_limit); - s.append(&self.gas_used); - s.append(&self.timestamp); - s.append(&self.extra_data); - - if with_seal { - for b in &self.seal { - s.append_raw(b, 1); - } - } - - s.out().to_vec() - } -} - -impl UnsignedTransaction { - /// Decode unsigned portion of raw transaction RLP. - pub fn decode_rlp(raw_tx: &[u8]) -> Result { - let tx_rlp = Rlp::new(raw_tx); - let to = tx_rlp.at(3)?; - Ok(UnsignedTransaction { - nonce: tx_rlp.val_at(0)?, - gas_price: tx_rlp.val_at(1)?, - gas: tx_rlp.val_at(2)?, - to: match to.is_empty() { - false => Some(to.as_val()?), - true => None, - }, - value: tx_rlp.val_at(4)?, - payload: tx_rlp.val_at(5)?, - }) - } - - /// Returns message that has to be signed to sign this transaction. - pub fn message(&self, chain_id: Option) -> H256 { - keccak_256(&self.rlp(chain_id)).into() - } - - /// Returns unsigned transaction RLP. - pub fn rlp(&self, chain_id: Option) -> Bytes { - let mut stream = RlpStream::new_list(if chain_id.is_some() { 9 } else { 6 }); - self.rlp_to(chain_id, &mut stream); - stream.out().to_vec() - } - - /// Encode to given rlp stream. - pub fn rlp_to(&self, chain_id: Option, stream: &mut RlpStream) { - stream.append(&self.nonce); - stream.append(&self.gas_price); - stream.append(&self.gas); - match self.to { - Some(to) => stream.append(&to), - None => stream.append(&""), - }; - stream.append(&self.value); - stream.append(&self.payload); - if let Some(chain_id) = chain_id { - stream.append(&chain_id); - stream.append(&0u8); - stream.append(&0u8); - } - } -} - -impl Receipt { - /// Decode status from raw transaction receipt RLP. - pub fn is_successful_raw_receipt(raw_receipt: &[u8]) -> Result { - let rlp = Rlp::new(raw_receipt); - if rlp.item_count()? == 3 { - // no outcome - invalid tx? - Ok(false) - } else { - let first = rlp.at(0)?; - if first.is_data() && first.data()?.len() <= 1 { - // EIP-658 transaction - status of successful transaction is 1 - let status: u8 = first.as_val()?; - Ok(status == 1) - } else { - // pre-EIP-658 transaction - we do not support this kind of transactions - Ok(false) - } - } - } - - /// Returns receipt RLP. - pub fn rlp(&self) -> Bytes { - let mut s = RlpStream::new(); - match self.outcome { - TransactionOutcome::Unknown => { - s.begin_list(3); - } - TransactionOutcome::StateRoot(ref root) => { - s.begin_list(4); - s.append(root); - } - TransactionOutcome::StatusCode(ref status_code) => { - s.begin_list(4); - s.append(status_code); - } - } - s.append(&self.gas_used); - s.append(&EthBloom::from(self.log_bloom.0)); - - s.begin_list(self.logs.len()); - for log in &self.logs { - s.begin_list(3); - s.append(&log.address); - s.begin_list(log.topics.len()); - for topic in &log.topics { - s.append(topic); - } - s.append(&log.data); - } - - s.out().to_vec() - } -} - -impl SealedEmptyStep { - /// Returns message that has to be signed by the validator. - pub fn message(&self, parent_hash: &H256) -> H256 { - let mut message = RlpStream::new_list(2); - message.append(&self.step); - message.append(parent_hash); - keccak_256(&message.out()).into() - } - - /// Returns rlp for the vector of empty steps (we only do encoding in tests). - pub fn rlp_of(empty_steps: &[SealedEmptyStep]) -> Bytes { - let mut s = RlpStream::new(); - s.begin_list(empty_steps.len()); - for empty_step in empty_steps { - s.begin_list(2).append(&empty_step.signature).append(&empty_step.step); - } - s.out().to_vec() - } -} - -impl Decodable for SealedEmptyStep { - fn decode(rlp: &Rlp) -> Result { - let signature: H520 = rlp.val_at(0)?; - let step = rlp.val_at(1)?; - - Ok(SealedEmptyStep { signature, step }) - } -} - -impl LogEntry { - /// Calculates the bloom of this log entry. - pub fn bloom(&self) -> Bloom { - let eth_bloom = - self.topics - .iter() - .fold(EthBloom::from(BloomInput::Raw(self.address.as_bytes())), |mut b, t| { - b.accrue(BloomInput::Raw(t.as_bytes())); - b - }); - Bloom(*eth_bloom.data()) - } -} - -impl Bloom { - /// Returns true if this bloom has all bits from the other set. - pub fn contains(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| (l & r) == *r) - } -} - -impl<'a> From<&'a [u8; 256]> for Bloom { - fn from(buffer: &'a [u8; 256]) -> Bloom { - Bloom(*buffer) - } -} - -impl PartialEq for Bloom { - fn eq(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| l == r) - } -} - -impl Default for Bloom { - fn default() -> Self { - Bloom([0; 256]) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Debug for Bloom { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Bloom").finish() - } -} - -/// Decode Ethereum transaction. -pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result { - // parse transaction fields - let unsigned = UnsignedTransaction::decode_rlp(raw_tx)?; - let tx_rlp = Rlp::new(raw_tx); - let v: u64 = tx_rlp.val_at(6)?; - let r: U256 = tx_rlp.val_at(7)?; - let s: U256 = tx_rlp.val_at(8)?; - - // reconstruct signature - let mut signature = [0u8; 65]; - let (chain_id, v) = match v { - v if v == 27u64 => (None, 0), - v if v == 28u64 => (None, 1), - v if v >= 35u64 => (Some((v - 35) / 2), ((v - 1) % 2) as u8), - _ => (None, 4), - }; - r.to_big_endian(&mut signature[0..32]); - s.to_big_endian(&mut signature[32..64]); - signature[64] = v; - - // reconstruct message that has been signed - let message = unsigned.message(chain_id); - - // recover tx sender - let sender_public = sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes()) - .map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?; - let sender_address = public_to_address(&sender_public); - - Ok(Transaction { - sender: sender_address, - unsigned, - }) -} - -/// Convert public key into corresponding ethereum address. -pub fn public_to_address(public: &[u8; 64]) -> Address { - let hash = keccak_256(public); - let mut result = Address::zero(); - result.as_bytes_mut().copy_from_slice(&hash[12..]); - result -} - -/// Check ethereum merkle proof. -/// Returns Ok(computed-root) if check succeeds. -/// Returns Err(computed-root) if check fails. -fn check_merkle_proof>(expected_root: H256, items: impl Iterator) -> Result { - let computed_root = compute_merkle_root(items); - if computed_root == expected_root { - Ok(computed_root) - } else { - Err(computed_root) - } -} - -/// Compute ethereum merkle root. -pub fn compute_merkle_root>(items: impl Iterator) -> H256 { - struct Keccak256Hasher; - - impl hash_db::Hasher for Keccak256Hasher { - type Out = H256; - type StdHasher = plain_hasher::PlainHasher; - const LENGTH: usize = 32; - fn hash(x: &[u8]) -> Self::Out { - keccak_256(x).into() - } - } - - triehash::ordered_trie_root::(items) -} - -/// Get validator that should author the block at given step. -pub fn step_validator(header_validators: &[T], header_step: u64) -> &T { - &header_validators[(header_step % header_validators.len() as u64) as usize] -} - -sp_api::decl_runtime_apis! { - /// API for querying information about headers from the Rialto Bridge Pallet - pub trait RialtoPoAHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } - - /// API for querying information about headers from the Kovan Bridge Pallet - pub trait KovanHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn transfer_transaction_decode_works() { - // value transfer transaction - // https://etherscan.io/tx/0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - // https://etherscan.io/getRawTx?tx=0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - let raw_tx = hex!("f86c0a85046c7cfe0083016dea94d1310c1e038bc12865d3d3997275b3e4737c6302880b503be34d9fe80080269fc7eaaa9c21f59adf8ad43ed66cf5ef9ee1c317bd4d32cd65401e7aaca47cfaa0387d79c65b90be6260d09dcfb780f29dd8133b9b1ceb20b83b7e442b4bfc30cb"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("67835910d32600471f388a137bbff3eb07993c04").into(), - unsigned: UnsignedTransaction { - nonce: 10.into(), - gas_price: 19000000000u64.into(), - gas: 93674.into(), - to: Some(hex!("d1310c1e038bc12865d3d3997275b3e4737c6302").into()), - value: 815217380000000000_u64.into(), - payload: Default::default(), - } - }), - ); - - // Kovan value transfer transaction - // https://kovan.etherscan.io/tx/0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - // https://kovan.etherscan.io/getRawTx?tx=0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - let raw_tx = hex!("f86a822816808252089470c1ccde719d6f477084f07e4137ab0e55f8369f8930cf46e92063afd8008078a00e4d1f4d8aa992bda3c105ff3d6e9b9acbfd99facea00985e2131029290adbdca028ea29a46a4b66ec65b454f0706228e3768cb0ecf755f67c50ddd472f11d5994"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - unsigned: UnsignedTransaction { - nonce: 10262.into(), - gas_price: 0.into(), - gas: 21000.into(), - to: Some(hex!("70c1ccde719d6f477084f07e4137ab0e55f8369f").into()), - value: 900379597077600000000_u128.into(), - payload: Default::default(), - }, - }), - ); - } - - #[test] - fn payload_transaction_decode_works() { - // contract call transaction - // https://etherscan.io/tx/0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - // https://etherscan.io/getRawTx?tx=0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - let raw_tx = hex!("f8aa76850430e234008301500094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b4025a0964e023999621dc3d4d831c43c71f7555beb6d1192dee81a3674b3f57e310f21a00f229edd86f841d1ee4dc48cc16667e2283817b1d39bae16ced10cd206ae4fd4"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("2b9a4d37bdeecdf994c4c9ad7f3cf8dc632f7d70").into(), - unsigned: UnsignedTransaction { - nonce: 118.into(), - gas_price: 18000000000u64.into(), - gas: 86016.into(), - to: Some(hex!("dac17f958d2ee523a2206206994597c13d831ec7").into()), - value: 0.into(), - payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec(), - }, - }), - ); - - // Kovan contract call transaction - // https://kovan.etherscan.io/tx/0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - // https://kovan.etherscan.io/getRawTx?tx=0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - let raw_tx = hex!("f8ac8302200b843b9aca00830271009484dd11eb2a29615303d18149c0dbfa24167f896680b844a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b600000000000000000000000000000000000000000000000000000000000027101ba0ce126d2cca81f5e245f292ff84a0d915c0a4ac52af5c51219db1e5d36aa8da35a0045298b79dac631907403888f9b04c2ab5509fe0cc31785276d30a40b915fcf9"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("617da121abf03d4c1af572f5a4e313e26bef7bdc").into(), - unsigned: UnsignedTransaction { - nonce: 139275.into(), - gas_price: 1000000000.into(), - gas: 160000.into(), - to: Some(hex!("84dd11eb2a29615303d18149c0dbfa24167f8966").into()), - value: 0.into(), - payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec(), - }, - }), - ); - } - - #[test] - fn is_successful_raw_receipt_works() { - assert!(Receipt::is_successful_raw_receipt(&[]).is_err()); - - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::Unknown, - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StateRoot(Default::default()), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(0), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(true), - ); - } - - #[test] - fn is_successful_raw_receipt_with_empty_data() { - let mut stream = RlpStream::new(); - stream.begin_list(4); - stream.append_empty_data(); - stream.append(&1u64); - stream.append(&2u64); - stream.append(&3u64); - - assert_eq!(Receipt::is_successful_raw_receipt(&stream.out()), Ok(false)); - } -} diff --git a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs b/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs deleted file mode 100644 index a4e076f2200c6217b6148a212d16d4820465f171..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . -// - -//! Helpers related to signatures. -//! -//! Used for testing and benchmarking. - -// reexport to avoid direct secp256k1 deps by other crates -pub use secp256k1::SecretKey; - -use crate::{ - public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction, UnsignedTransaction, H256, - H520, U256, -}; - -use secp256k1::{Message, PublicKey}; - -/// Utilities for signing headers. -pub trait SignHeader { - /// Signs header by given author. - fn sign_by(self, author: &SecretKey) -> AuraHeader; - /// Signs header by given authors set. - fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader; -} - -/// Utilities for signing transactions. -pub trait SignTransaction { - /// Sign transaction by given author. - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction; -} - -impl SignHeader for AuraHeader { - fn sign_by(mut self, author: &SecretKey) -> Self { - self.author = secret_to_address(author); - - let message = self.seal_hash(false).unwrap(); - let signature = sign(author, message); - self.seal[1] = rlp_encode(&signature).to_vec(); - self - } - - fn sign_by_set(self, authors: &[SecretKey]) -> Self { - let step = self.step().unwrap(); - let author = step_validator(authors, step); - self.sign_by(author) - } -} - -impl SignTransaction for UnsignedTransaction { - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction { - let message = self.message(chain_id); - let signature = sign(author, message); - let signature_r = U256::from_big_endian(&signature.as_fixed_bytes()[..32][..]); - let signature_s = U256::from_big_endian(&signature.as_fixed_bytes()[32..64][..]); - let signature_v = signature.as_fixed_bytes()[64] as u64; - let signature_v = signature_v + if let Some(n) = chain_id { 35 + n * 2 } else { 27 }; - - let mut stream = rlp::RlpStream::new_list(9); - self.rlp_to(None, &mut stream); - stream.append(&signature_v); - stream.append(&signature_r); - stream.append(&signature_s); - stream.out().to_vec() - } -} - -/// Return author's signature over given message. -pub fn sign(author: &SecretKey, message: H256) -> H520 { - let (signature, recovery_id) = secp256k1::sign(&Message::parse(message.as_fixed_bytes()), author); - let mut raw_signature = [0u8; 65]; - raw_signature[..64].copy_from_slice(&signature.serialize()); - raw_signature[64] = recovery_id.serialize(); - raw_signature.into() -} - -/// Returns address corresponding to given secret key. -pub fn secret_to_address(secret: &SecretKey) -> Address { - let public = PublicKey::from_secret_key(secret); - let mut raw_public = [0u8; 64]; - raw_public.copy_from_slice(&public.serialize()[1..]); - public_to_address(&raw_public) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{transaction_decode_rlp, Transaction}; - - #[test] - fn transaction_signed_properly() { - // case1: with chain_id replay protection + to - let signer = SecretKey::parse(&[1u8; 32]).unwrap(); - let signer_address = secret_to_address(&signer); - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: Some([42u8; 20].into()), - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, Some(42)); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: signer_address, - unsigned, - }), - ); - - // case2: without chain_id replay protection + contract creation - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: None, - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, None); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: signer_address, - unsigned, - }), - ); - } -} diff --git a/polkadot/bridges/primitives/header-chain/Cargo.toml b/polkadot/bridges/primitives/header-chain/Cargo.toml index e64a54a1ad20fed03dfb402243509499052851fe..76b710247f746c0358791a9e7f714dd18eec7bb9 100644 --- a/polkadot/bridges/primitives/header-chain/Cargo.toml +++ b/polkadot/bridges/primitives/header-chain/Cargo.toml @@ -7,18 +7,18 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -finality-grandpa = { version = "0.14.4", default-features = false } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } +finality-grandpa = { version = "0.14.0", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } serde = { version = "1.0", optional = true } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [dev-dependencies] assert_matches = "1.5" @@ -32,6 +32,7 @@ std = [ "scale-info/std", "serde/std", "frame-support/std", + "scale-info/std", "sp-core/std", "sp-finality-grandpa/std", "sp-runtime/std", diff --git a/polkadot/bridges/primitives/header-chain/src/justification.rs b/polkadot/bridges/primitives/header-chain/src/justification.rs index fc72564810685a942c3191e0d93679ed14cfa9ad..5f3b72517894842b37d6a8d6117543f05e002111 100644 --- a/polkadot/bridges/primitives/header-chain/src/justification.rs +++ b/polkadot/bridges/primitives/header-chain/src/justification.rs @@ -25,8 +25,10 @@ use frame_support::RuntimeDebug; use scale_info::TypeInfo; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId}; use sp_runtime::traits::Header as HeaderT; -use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use sp_std::prelude::*; +use sp_std::{ + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + prelude::*, +}; /// A GRANDPA Justification is a proof that a given header was finalized /// at a certain height and with a certain set of authorities. @@ -38,7 +40,8 @@ pub struct GrandpaJustification { /// The round (voting period) this justification is valid for. pub round: u64, /// The set of votes for the chain which is to be finalized. - pub commit: finality_grandpa::Commit, + pub commit: + finality_grandpa::Commit, /// A proof that the chain of blocks in the commit are related to each other. pub votes_ancestries: Vec
, } @@ -58,7 +61,8 @@ pub enum Error { InvalidJustificationTarget, /// The authority has provided an invalid signature. InvalidAuthoritySignature, - /// The justification contains pre-commit for header that is not a descendant of the commit header. + /// The justification contains precommit for header that is not a descendant of the commit + /// header. PrecommitIsNotCommitDescendant, /// The cumulative weight of all votes in the justification is not enough to justify commit /// header finalization. @@ -88,7 +92,7 @@ where { // ensure that it is justification for the expected header if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { - return Err(Error::InvalidJustificationTarget); + return Err(Error::InvalidJustificationTarget) } let mut chain = AncestryChain::new(&justification.votes_ancestries); @@ -100,30 +104,32 @@ where let authority_info = match authorities_set.get(&signed.id) { Some(authority_info) => authority_info, None => { - // just ignore precommit from unknown authority as `finality_grandpa::import_precommit` does - continue; - } + // just ignore precommit from unknown authority as + // `finality_grandpa::import_precommit` does + continue + }, }; // check if authority has already voted in the same round. // // there's a lot of code in `validate_commit` and `import_precommit` functions inside - // `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing that we - // care about is that only first vote from the authority is accepted + // `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing + // that we care about is that only first vote from the authority is accepted if !votes.insert(signed.id.clone()) { - continue; + continue } // everything below this line can't just `continue`, because state is already altered // all precommits must be for block higher than the target if signed.precommit.target_number < justification.commit.target_number { - return Err(Error::PrecommitIsNotCommitDescendant); + return Err(Error::PrecommitIsNotCommitDescendant) } - // all precommits must be for target block descendants - chain = chain.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?; - // since we know now that the precommit target is the descendant of the justification target, - // we may increase 'weight' of the justification target + // all precommits must be for target block descendents + chain = chain + .ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?; + // since we know now that the precommit target is the descendant of the justification + // target, we may increase 'weight' of the justification target // // there's a lot of code in the `VoteGraph::insert` method inside `finality-grandpa` crate, // but in the end it is only used to find GHOST, which we don't care about. The only thing @@ -145,13 +151,13 @@ where authorities_set_id, &mut signature_buffer, ) { - return Err(Error::InvalidAuthoritySignature); + return Err(Error::InvalidAuthoritySignature) } } // check that there are no extra headers in the justification if !chain.unvisited.is_empty() { - return Err(Error::ExtraHeadersInVotesAncestries); + return Err(Error::ExtraHeadersInVotesAncestries) } // check that the cumulative weight of validators voted for the justification target (or one @@ -169,7 +175,7 @@ where pub struct AncestryChain { /// Header hash => parent header hash mapping. pub parents: BTreeMap, - /// Hashes of headers that weren't visited by `is_ancestor` method. + /// Hashes of headers that were not visited by `is_ancestor` method. pub unvisited: BTreeSet, } @@ -187,7 +193,8 @@ impl AncestryChain
{ AncestryChain { parents, unvisited } } - /// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and `Ok(_)` otherwise. + /// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and + /// `Ok(_)` otherwise. pub fn ensure_descendant( mut self, commit_target: &Header::Hash, @@ -196,22 +203,22 @@ impl AncestryChain
{ let mut current_hash = *precommit_target; loop { if current_hash == *commit_target { - break; + break } let is_visited_before = !self.unvisited.remove(¤t_hash); current_hash = match self.parents.get(¤t_hash) { Some(parent_hash) => { if is_visited_before { - // `Some(parent_hash)` means that the `current_hash` is in the `parents` container - // `is_visited_before` means that it has been visited before in some of previous calls - // => since we assume that previous call has finished with `true`, this also will - // be finished with `true` - return Ok(self); + // `Some(parent_hash)` means that the `current_hash` is in the `parents` + // container `is_visited_before` means that it has been visited before in + // some of previous calls => since we assume that previous call has finished + // with `true`, this also will be finished with `true` + return Ok(self) } *parent_hash - } + }, None => return Err(Error::PrecommitIsNotCommitDescendant), }; } diff --git a/polkadot/bridges/primitives/header-chain/src/lib.rs b/polkadot/bridges/primitives/header-chain/src/lib.rs index 16511e99f79e706dee6e0561d2f3f852aba0b406..5feb30aec3eeeb136b9558bf0efe8e5cf4e8cf85 100644 --- a/polkadot/bridges/primitives/header-chain/src/lib.rs +++ b/polkadot/bridges/primitives/header-chain/src/lib.rs @@ -20,24 +20,21 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::{Codec, Decode, Encode, EncodeLike}; -use core::clone::Clone; -use core::cmp::Eq; -use core::default::Default; -use core::fmt::Debug; +use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; use sp_finality_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; -use sp_runtime::RuntimeDebug; -use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT}; +use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT, RuntimeDebug}; +use sp_std::boxed::Box; pub mod justification; /// A type that can be used as a parameter in a dispatchable function. /// /// When using `decl_module` all arguments for call functions must implement this trait. -pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug {} -impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug {} +pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {} +impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {} /// A GRANDPA Authority List and ID. #[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone, TypeInfo)] @@ -63,7 +60,7 @@ impl AuthoritySet { #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct InitializationData { /// The header from which we should start syncing. - pub header: H, + pub header: Box, /// The initial authorities of the pallet. pub authority_list: AuthorityList, /// The ID of the initial authority set. @@ -82,7 +79,9 @@ pub trait InclusionProofVerifier { /// Verify that transaction is a part of given block. /// /// Returns Some(transaction) if proof is valid and None otherwise. - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option; + fn verify_transaction_inclusion_proof( + proof: &Self::TransactionInclusionProof, + ) -> Option; } /// A trait for pallets which want to keep track of finalized headers from a bridged chain. diff --git a/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs b/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs index 0b55c19035287bd72932d66c8c8b6e5b6ba416cd..51275bbd645e50d45759df82672b1002eea161a2 100644 --- a/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs +++ b/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs @@ -23,8 +23,8 @@ use assert_matches::assert_matches; use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification}; use bp_test_utils::{ - header_id, make_justification_for_header, signed_precommit, test_header, Account, JustificationGeneratorParams, - ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID, + header_id, make_justification_for_header, signed_precommit, test_header, Account, + JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID, }; use finality_grandpa::voter_set::VoterSet; use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; @@ -44,18 +44,22 @@ impl AncestryChain { } impl finality_grandpa::Chain for AncestryChain { - fn ancestry(&self, base: TestHash, block: TestHash) -> Result, finality_grandpa::Error> { + fn ancestry( + &self, + base: TestHash, + block: TestHash, + ) -> Result, finality_grandpa::Error> { let mut route = Vec::new(); let mut current_hash = block; loop { if current_hash == base { - break; + break } match self.0.parents.get(¤t_hash).cloned() { Some(parent_hash) => { current_hash = parent_hash; route.push(current_hash); - } + }, _ => return Err(finality_grandpa::Error::NotDescendent), } } @@ -81,14 +85,11 @@ fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> { vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)] } -/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a header finality. +/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a +/// header finality. pub fn minimal_voter_set() -> VoterSet { - VoterSet::new( - minimal_accounts_set() - .iter() - .map(|(id, w)| (AuthorityId::from(*id), *w)), - ) - .unwrap() + VoterSet::new(minimal_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w))) + .unwrap() } /// Make a valid GRANDPA justification with sensible defaults. @@ -174,14 +175,8 @@ fn same_result_when_justification_contains_duplicate_vote() { let mut justification = make_default_justification(&test_header(1)); // the justification may contain exactly the same vote (i.e. same precommit and same signature) // multiple times && it isn't treated as an error by original implementation - justification - .commit - .precommits - .push(justification.commit.precommits[0].clone()); - justification - .commit - .precommits - .push(justification.commit.precommits[0].clone()); + justification.commit.precommits.push(justification.commit.precommits[0].clone()); + justification.commit.precommits.push(justification.commit.precommits[0].clone()); // our implementation succeeds assert_eq!( diff --git a/polkadot/bridges/primitives/message-dispatch/Cargo.toml b/polkadot/bridges/primitives/message-dispatch/Cargo.toml index 9b24ae86a067858d8b748ba216116f438255edc9..9897b3199781db61b2e3dd98698b272cd92a4f8e 100644 --- a/polkadot/bridges/primitives/message-dispatch/Cargo.toml +++ b/polkadot/bridges/primitives/message-dispatch/Cargo.toml @@ -8,13 +8,13 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] diff --git a/polkadot/bridges/primitives/message-dispatch/src/lib.rs b/polkadot/bridges/primitives/message-dispatch/src/lib.rs index b2683674b776946920b2e28da87cf014746b0dc2..07e448ee7ae62604399c693e9195c7b43b796d09 100644 --- a/polkadot/bridges/primitives/message-dispatch/src/lib.rs +++ b/polkadot/bridges/primitives/message-dispatch/src/lib.rs @@ -35,7 +35,7 @@ pub type Weight = u64; pub type SpecVersion = u32; /// A generic trait to dispatch arbitrary messages delivered over the bridge. -pub trait MessageDispatch { +pub trait MessageDispatch { /// A type of the message to be dispatched. type Message: codec::Decode; @@ -61,7 +61,7 @@ pub trait MessageDispatch { fn dispatch Result<(), ()>>( source_chain: ChainId, target_chain: ChainId, - id: MessageId, + id: BridgeMessageId, message: Result, pay_dispatch_fee: P, ) -> MessageDispatchResult; @@ -78,7 +78,7 @@ pub enum CallOrigin { +pub struct MessagePayload< + SourceChainAccountId, + TargetChainAccountPublic, + TargetChainSignature, + Call, +> { /// Runtime specification version. We only dispatch messages that have the same /// runtime version. Otherwise we risk to misinterpret encoded calls. pub spec_version: SpecVersion, diff --git a/polkadot/bridges/primitives/messages/Cargo.toml b/polkadot/bridges/primitives/messages/Cargo.toml index 191742005f443efc78a330d08774d41252dabd70..31ec46222cd890aa2a3e13a3724937c3e2e1cfc9 100644 --- a/polkadot/bridges/primitives/messages/Cargo.toml +++ b/polkadot/bridges/primitives/messages/Cargo.toml @@ -8,10 +8,10 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] bitvec = { version = "0.20", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive", "bit-vec"] } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "bit-vec"] } impl-trait-for-tuples = "0.2" -scale-info = { version = "1.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["bit-vec", "derive"] } +serde = { version = "1.0", optional = true, features = ["derive"] } # Bridge dependencies @@ -19,9 +19,9 @@ bp-runtime = { path = "../runtime", default-features = false } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] diff --git a/polkadot/bridges/primitives/messages/src/lib.rs b/polkadot/bridges/primitives/messages/src/lib.rs index 344735b53c91647640a619610f7ffb23f95af293..abefe8d789b8754d91edfac6aafdeb4c07d0c49c 100644 --- a/polkadot/bridges/primitives/messages/src/lib.rs +++ b/polkadot/bridges/primitives/messages/src/lib.rs @@ -76,7 +76,7 @@ pub type LaneId = [u8; 4]; pub type MessageNonce = u64; /// Message id as a tuple. -pub type MessageId = (LaneId, MessageNonce); +pub type BridgeMessageId = (LaneId, MessageNonce); /// Opaque message payload. We only decode this payload when it is dispatched. pub type MessagePayload = Vec; @@ -111,22 +111,23 @@ pub struct Message { /// Inbound lane data. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] pub struct InboundLaneData { - /// Identifiers of relayers and messages that they have delivered to this lane (ordered by message nonce). + /// Identifiers of relayers and messages that they have delivered to this lane (ordered by + /// message nonce). /// /// This serves as a helper storage item, to allow the source chain to easily pay rewards - /// to the relayers who succesfuly delivered messages to the target chain (inbound lane). + /// to the relayers who successfully delivered messages to the target chain (inbound lane). /// /// It is guaranteed to have at most N entries, where N is configured at the module level. /// If there are N entries in this vec, then: - /// 1) all incoming messages are rejected if they're missing corresponding `proof-of(outbound-lane.state)`; - /// 2) all incoming messages are rejected if `proof-of(outbound-lane.state).last_delivered_nonce` is - /// equal to `self.last_confirmed_nonce`. - /// Given what is said above, all nonces in this queue are in range: - /// `(self.last_confirmed_nonce; self.last_delivered_nonce()]`. + /// 1) all incoming messages are rejected if they're missing corresponding + /// `proof-of(outbound-lane.state)`; 2) all incoming messages are rejected if + /// `proof-of(outbound-lane.state).last_delivered_nonce` is equal to + /// `self.last_confirmed_nonce`. Given what is said above, all nonces in this queue are in + /// range: `(self.last_confirmed_nonce; self.last_delivered_nonce()]`. /// /// When a relayer sends a single message, both of MessageNonces are the same. - /// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the highest nonce. - /// Multiple dispatches from the same relayer are allowed. + /// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the + /// highest nonce. Multiple dispatches from the same relayer are allowed. pub relayers: VecDeque>, /// Nonce of the last message that @@ -142,24 +143,26 @@ pub struct InboundLaneData { impl Default for InboundLaneData { fn default() -> Self { - InboundLaneData { - relayers: VecDeque::new(), - last_confirmed_nonce: 0, - } + InboundLaneData { relayers: VecDeque::new(), last_confirmed_nonce: 0 } } } impl InboundLaneData { - /// Returns approximate size of the struct, given number of entries in the `relayers` set and + /// Returns approximate size of the struct, given a number of entries in the `relayers` set and /// size of each entry. /// /// Returns `None` if size overflows `u32` limits. - pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32, messages_count: u32) -> Option { + pub fn encoded_size_hint( + relayer_id_encoded_size: u32, + relayers_entries: u32, + messages_count: u32, + ) -> Option { let message_nonce_size = 8; let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?; let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?; let dispatch_results_per_byte = 8; - let dispatch_result_size = sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte); + let dispatch_result_size = + sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte); relayers_size .checked_add(message_nonce_size) .and_then(|result| result.checked_add(dispatch_result_size)) @@ -194,8 +197,8 @@ pub type DispatchResultsBitVec = BitVec; /// Unrewarded relayer entry stored in the inbound lane data. /// -/// This struct represents a continuous range of messages that have been delivered by the same relayer -/// and whose confirmations are still pending. +/// This struct represents a continuous range of messages that have been delivered by the same +/// relayer and whose confirmations are still pending. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] pub struct UnrewardedRelayer { /// Identifier of the relayer. @@ -218,7 +221,8 @@ pub struct DeliveredMessages { } impl DeliveredMessages { - /// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given dispatch result. + /// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given + /// dispatch result. pub fn new(nonce: MessageNonce, dispatch_result: bool) -> Self { DeliveredMessages { begin: nonce, @@ -227,6 +231,15 @@ impl DeliveredMessages { } } + /// Return total count of delivered messages. + pub fn total_messages(&self) -> MessageNonce { + if self.end >= self.begin { + self.end - self.begin + 1 + } else { + 0 + } + } + /// Note new dispatched message. pub fn note_dispatched_message(&mut self, dispatch_result: bool) { self.end += 1; @@ -269,19 +282,20 @@ pub struct UnrewardedRelayersState { /// Outbound lane data. #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] pub struct OutboundLaneData { - /// Nonce of oldest message that we haven't yet pruned. May point to not-yet-generated message if - /// all sent messages are already pruned. + /// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated + /// message if all sent messages are already pruned. pub oldest_unpruned_nonce: MessageNonce, - /// Nonce of latest message, received by bridged chain. + /// Nonce of the latest message, received by bridged chain. pub latest_received_nonce: MessageNonce, - /// Nonce of latest message, generated by us. + /// Nonce of the latest message, generated by us. pub latest_generated_nonce: MessageNonce, } impl Default for OutboundLaneData { fn default() -> Self { OutboundLaneData { - // it is 1 because we're pruning everything in [oldest_unpruned_nonce; latest_received_nonce] + // it is 1 because we're pruning everything in [oldest_unpruned_nonce; + // latest_received_nonce] oldest_unpruned_nonce: 1, latest_received_nonce: 0, latest_generated_nonce: 0, @@ -292,7 +306,9 @@ impl Default for OutboundLaneData { /// Returns total number of messages in the `InboundLaneData::relayers` vector. /// /// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`). -pub fn total_unrewarded_messages(relayers: &VecDeque>) -> Option { +pub fn total_unrewarded_messages( + relayers: &VecDeque>, +) -> Option { match (relayers.front(), relayers.back()) { (Some(front), Some(back)) => { if let Some(difference) = back.messages.end.checked_sub(front.messages.begin) { @@ -300,7 +316,7 @@ pub fn total_unrewarded_messages(relayers: &VecDeque Some(0), } } @@ -314,10 +330,7 @@ mod tests { assert_eq!( total_unrewarded_messages( &vec![ - UnrewardedRelayer { - relayer: 1, - messages: DeliveredMessages::new(0, true) - }, + UnrewardedRelayer { relayer: 1, messages: DeliveredMessages::new(0, true) }, UnrewardedRelayer { relayer: 2, messages: DeliveredMessages::new(MessageNonce::MAX, true) @@ -341,7 +354,11 @@ mod tests { (13u8, 128u8), ]; for (relayer_entries, messages_count) in test_cases { - let expected_size = InboundLaneData::::encoded_size_hint(1, relayer_entries as _, messages_count as _); + let expected_size = InboundLaneData::::encoded_size_hint( + 1, + relayer_entries as _, + messages_count as _, + ); let actual_size = InboundLaneData { relayers: (1u8..=relayer_entries) .map(|i| { @@ -375,11 +392,8 @@ mod tests { #[test] fn message_dispatch_result_works() { - let delivered_messages = DeliveredMessages { - begin: 100, - end: 150, - dispatch_results: bitvec![Msb0, u8; 1; 151], - }; + let delivered_messages = + DeliveredMessages { begin: 100, end: 150, dispatch_results: bitvec![Msb0, u8; 1; 151] }; assert!(!delivered_messages.contains_message(99)); assert!(delivered_messages.contains_message(100)); diff --git a/polkadot/bridges/primitives/messages/src/source_chain.rs b/polkadot/bridges/primitives/messages/src/source_chain.rs index 392331eda6722c136a4b1423f64e334c8d5227fa..1ff05abf131eae97417354300640744c05a000c8 100644 --- a/polkadot/bridges/primitives/messages/src/source_chain.rs +++ b/polkadot/bridges/primitives/messages/src/source_chain.rs @@ -18,9 +18,14 @@ use crate::{DeliveredMessages, InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; +use crate::UnrewardedRelayer; use bp_runtime::Size; -use frame_support::{Parameter, RuntimeDebug}; -use sp_std::{collections::btree_map::BTreeMap, fmt::Debug}; +use frame_support::{weights::Weight, Parameter, RuntimeDebug}; +use sp_std::{ + collections::{btree_map::BTreeMap, vec_deque::VecDeque}, + fmt::Debug, + ops::RangeInclusive, +}; /// The sender of the message on the source chain. pub type Sender = frame_system::RawOrigin; @@ -56,14 +61,14 @@ pub trait TargetHeaderChain { /// /// The proper implementation must ensure that the delivery-transaction with this /// payload would (at least) be accepted into target chain transaction pool AND - /// eventually will be successfully 'mined'. The most obvious incorrect implementation + /// eventually will be successfully mined. The most obvious incorrect implementation /// example would be implementation for BTC chain that accepts payloads larger than /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer /// will be unable to craft valid transaction => this (and all subsequent) messages will /// never be delivered. fn verify_message(payload: &Payload) -> Result<(), Self::Error>; - /// Verify messages delivery proof and return lane && nonce of the latest recevied message. + /// Verify messages delivery proof and return lane && nonce of the latest received message. fn verify_messages_delivery_proof( proof: Self::MessagesDeliveryProof, ) -> Result<(LaneId, InboundLaneData), Self::Error>; @@ -81,7 +86,8 @@ pub trait LaneMessageVerifier { /// Error type. type Error: Debug + Into<&'static str>; - /// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the lane. + /// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the + /// lane. fn verify_message( submitter: &Sender, delivery_and_dispatch_fee: &Fee, @@ -95,14 +101,14 @@ pub trait LaneMessageVerifier { /// submitter is paying (in source chain tokens/assets) for: /// /// 1) submit-message-transaction-fee itself. This fee is not included in the -/// `delivery_and_dispatch_fee` and is witheld by the regular transaction payment mechanism; +/// `delivery_and_dispatch_fee` and is withheld by the regular transaction payment mechanism; /// 2) message-delivery-transaction-fee. It is submitted to the target node by relayer; /// 3) message-dispatch fee. It is paid by relayer for processing message by target chain; /// 4) message-receiving-delivery-transaction-fee. It is submitted to the source node /// by relayer. /// /// So to be sure that any non-altruist relayer would agree to deliver message, submitter -/// should set `delivery_and_dispatch_fee` to at least (equialent of): sum of fees from (2) +/// should set `delivery_and_dispatch_fee` to at least (equivalent of): sum of fees from (2) /// to (4) above, plus some interest for the relayer. pub trait MessageDeliveryAndDispatchPayment { /// Error type. @@ -121,27 +127,98 @@ pub trait MessageDeliveryAndDispatchPayment { /// The implementation may also choose to pay reward to the `confirmation_relayer`, which is /// a relayer that has submitted delivery confirmation transaction. fn pay_relayers_rewards( + lane_id: LaneId, + messages_relayers: VecDeque>, confirmation_relayer: &AccountId, - relayers_rewards: RelayersRewards, + received_range: &RangeInclusive, relayer_fund_account: &AccountId, ); +} + +/// Send message artifacts. +#[derive(RuntimeDebug, PartialEq)] +pub struct SendMessageArtifacts { + /// Nonce of the message. + pub nonce: MessageNonce, + /// Actual weight of send message call. + pub weight: Weight, +} - /// Perform some initialization in externalities-provided environment. +/// Messages bridge API to be used from other pallets. +pub trait MessagesBridge { + /// Error type. + type Error: Debug; + + /// Send message over the bridge. /// - /// For instance you may ensure that particular required accounts or storage items are present. - /// Returns the number of storage reads performed. - fn initialize(_relayer_fund_account: &AccountId) -> usize { - 0 + /// Returns unique message nonce or error if send has failed. + fn send_message( + sender: Sender, + lane: LaneId, + message: Payload, + delivery_and_dispatch_fee: Balance, + ) -> Result; +} + +/// Bridge that does nothing when message is being sent. +#[derive(RuntimeDebug, PartialEq)] +pub struct NoopMessagesBridge; + +impl MessagesBridge + for NoopMessagesBridge +{ + type Error = &'static str; + + fn send_message( + _sender: Sender, + _lane: LaneId, + _message: Payload, + _delivery_and_dispatch_fee: Balance, + ) -> Result { + Ok(SendMessageArtifacts { nonce: 0, weight: 0 }) } } /// Handler for messages delivery confirmation. -#[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnDeliveryConfirmed { /// Called when we receive confirmation that our messages have been delivered to the /// target chain. The confirmation also has single bit dispatch result for every - /// confirmed message (see `DeliveredMessages` for details). - fn on_messages_delivered(_lane: &LaneId, _messages: &DeliveredMessages) {} + /// confirmed message (see `DeliveredMessages` for details). Guaranteed to be called + /// only when at least one message is delivered. + /// + /// Should return total weight consumed by the call. + /// + /// NOTE: messages pallet assumes that maximal weight that may be spent on processing + /// single message is single DB read + single DB write. So this function shall never + /// return weight that is larger than total number of messages * (db read + db write). + /// If your pallet needs more time for processing single message, please do it + /// from `on_initialize` call(s) of the next block(s). + fn on_messages_delivered(_lane: &LaneId, _messages: &DeliveredMessages) -> Weight; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl OnDeliveryConfirmed for Tuple { + fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight { + let mut total_weight: Weight = 0; + for_tuples!( + #( + total_weight = total_weight.saturating_add(Tuple::on_messages_delivered(lane, messages)); + )* + ); + total_weight + } +} + +/// Handler for messages have been accepted +pub trait OnMessageAccepted { + /// Called when a message has been accepted by message pallet. + fn on_messages_accepted(lane: &LaneId, message: &MessageNonce) -> Weight; +} + +impl OnMessageAccepted for () { + fn on_messages_accepted(_lane: &LaneId, _message: &MessageNonce) -> Weight { + 0 + } } /// Structure that may be used in place of `TargetHeaderChain`, `LaneMessageVerifier` and @@ -149,7 +226,8 @@ pub trait OnDeliveryConfirmed { pub struct ForbidOutboundMessages; /// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_OUTBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all outbound messages"; +const ALL_OUTBOUND_MESSAGES_REJECTED: &str = + "This chain is configured to reject all outbound messages"; impl TargetHeaderChain for ForbidOutboundMessages { type Error = &'static str; @@ -167,7 +245,9 @@ impl TargetHeaderChain for ForbidOutboun } } -impl LaneMessageVerifier for ForbidOutboundMessages { +impl LaneMessageVerifier + for ForbidOutboundMessages +{ type Error = &'static str; fn verify_message( @@ -181,7 +261,9 @@ impl LaneMessageVerifier for F } } -impl MessageDeliveryAndDispatchPayment for ForbidOutboundMessages { +impl MessageDeliveryAndDispatchPayment + for ForbidOutboundMessages +{ type Error = &'static str; fn pay_delivery_and_dispatch_fee( @@ -193,8 +275,10 @@ impl MessageDeliveryAndDispatchPayment f } fn pay_relayers_rewards( + _lane_id: LaneId, + _messages_relayers: VecDeque>, _confirmation_relayer: &AccountId, - _relayers_rewards: RelayersRewards, + _received_range: &RangeInclusive, _relayer_fund_account: &AccountId, ) { } diff --git a/polkadot/bridges/primitives/messages/src/target_chain.rs b/polkadot/bridges/primitives/messages/src/target_chain.rs index 8730597637918b39b2fdc85056eb8a0cf09cced9..a84ea7af907de6cb279bda58823e69ace22ad0cd 100644 --- a/polkadot/bridges/primitives/messages/src/target_chain.rs +++ b/polkadot/bridges/primitives/messages/src/target_chain.rs @@ -76,7 +76,7 @@ pub trait SourceHeaderChain { /// messages will be rejected. /// /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly + /// outside this function. This function only verifies that the proof declares exactly /// `messages_count` messages. fn verify_messages_proof( proof: Self::MessagesProof, @@ -112,23 +112,19 @@ pub trait MessageDispatch { impl Default for ProvedLaneMessages { fn default() -> Self { - ProvedLaneMessages { - lane_state: None, - messages: Vec::new(), - } + ProvedLaneMessages { lane_state: None, messages: Vec::new() } } } impl From> for DispatchMessage { fn from(message: Message) -> Self { - DispatchMessage { - key: message.key, - data: message.data.into(), - } + DispatchMessage { key: message.key, data: message.data.into() } } } -impl From> for DispatchMessageData { +impl From> + for DispatchMessageData +{ fn from(data: MessageData) -> Self { DispatchMessageData { payload: DispatchPayload::decode(&mut &data.payload[..]), @@ -142,7 +138,8 @@ impl From> for DispatchMessageDat pub struct ForbidInboundMessages; /// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_INBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all inbound messages"; +const ALL_INBOUND_MESSAGES_REJECTED: &str = + "This chain is configured to reject all inbound messages"; impl SourceHeaderChain for ForbidInboundMessages { type Error = &'static str; @@ -163,7 +160,10 @@ impl MessageDispatch for ForbidInboundMessages { Weight::MAX } - fn dispatch(_: &AccountId, _: DispatchMessage) -> MessageDispatchResult { + fn dispatch( + _: &AccountId, + _: DispatchMessage, + ) -> MessageDispatchResult { MessageDispatchResult { dispatch_result: false, unspent_weight: 0, diff --git a/polkadot/bridges/primitives/polkadot-core/Cargo.toml b/polkadot/bridges/primitives/polkadot-core/Cargo.toml index 5e95c223ce2ec8c7986d93c8375c6cabf270a8a8..f05edd0d91ba3cfb68927ebbae037fcf1c71d6a3 100644 --- a/polkadot/bridges/primitives/polkadot-core/Cargo.toml +++ b/polkadot/bridges/primitives/polkadot-core/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } # Bridge Dependencies @@ -17,9 +17,9 @@ bp-runtime = { path = "../runtime", default-features = false } # Substrate Based Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/polkadot/bridges/primitives/polkadot-core/src/lib.rs b/polkadot/bridges/primitives/polkadot-core/src/lib.rs index 151e374e6603f085e307525c94ce7ae92c54b2b4..38e43d312b5d48412ee8bbf79f89bb20924bd1a7 100644 --- a/polkadot/bridges/primitives/polkadot-core/src/lib.rs +++ b/polkadot/bridges/primitives/polkadot-core/src/lib.rs @@ -29,6 +29,7 @@ use frame_support::{ }; use frame_system::limits; use parity_scale_codec::Compact; +use scale_info::{StaticTypeInfo, TypeInfo}; use sp_core::Hasher as HasherT; use sp_runtime::{ generic, @@ -66,18 +67,19 @@ pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; /// All polkadot-like chains are using same crypto. pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; -/// All Polkadot-like chains allow normal extrinsics to fill block up to 75%. +/// All Polkadot-like chains allow normal extrinsics to fill block up to 75 percent. /// /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// All Polkadot-like chains allow 2 seconds of compute with a 6 second average block time. +/// All Polkadot-like chains allow 2 seconds of compute with a 6-second average block time. /// /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; -/// All Polkadot-like chains assume that an on-initialize consumes 1% of the weight on average, -/// hence a single extrinsic will not be allowed to consume more than `AvailableBlockRatio - 1%`. +/// All Polkadot-like chains assume that an on-initialize consumes 1 percent of the weight on +/// average, hence a single extrinsic will not be allowed to consume more than +/// `AvailableBlockRatio - 1 percent`. /// /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); @@ -113,7 +115,8 @@ parameter_types! { .build_or_panic(); } -/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can use. +/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can +/// use. pub fn max_extrinsic_weight() -> Weight { BlockWeights::get() .get(DispatchClass::Normal) @@ -138,6 +141,48 @@ pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; /// Maximal number of unconfirmed messages at inbound lane. pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192; +// One important thing about weight-related constants here is that actually we may have +// different weights on different Polkadot-like chains. But now all deployments are +// almost the same, so we're exporting constants from this crate. + +/// Maximal weight of single message delivery confirmation transaction on Polkadot-like chain. +/// +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` +/// weight formula computation for the case when single message is confirmed. The result then must +/// be rounded up to account possible future runtime upgrades. +pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; + +/// Increase of delivery transaction weight on Polkadot-like chain with every additional message +/// byte. +/// +/// This value is a result of +/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then +/// must be rounded up to account possible future runtime upgrades. +pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; + +/// Maximal number of bytes, included in the signed Polkadot-like transaction apart from the encoded +/// call itself. +/// +/// Can be computed by subtracting encoded call size from raw transaction size. +pub const TX_EXTRA_BYTES: u32 = 256; + +/// Weight of single regular message delivery transaction on Polkadot-like chain. +/// +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call +/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` +/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be +/// rounded up to account possible future runtime upgrades. +pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; + +/// Weight of pay-dispatch-fee operation for inbound messages at Polkadot-like chain. +/// +/// This value corresponds to the result of +/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your +/// chain. Don't put too much reserve there, because it is used to **decrease** +/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery +/// transactions cheaper. +pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; + /// Re-export `time_units` to make usage easier. pub use time_units::*; @@ -165,7 +210,7 @@ pub type Index = u32; /// Hashing type. pub type Hashing = BlakeTwo256; -/// The type of an object that can produce hashes on Polkadot-like chains. +/// The type of object that can produce hashes on Polkadot-like chains. pub type Hasher = BlakeTwo256; /// The header type used by Polkadot-like chains. @@ -180,6 +225,9 @@ pub type AccountPublic = ::Signer; /// Id of account on Polkadot-like chains. pub type AccountId = ::AccountId; +/// Address of account on Polkadot-like chains. +pub type AccountAddress = MultiAddress; + /// Index of a transaction on the Polkadot-like chains. pub type Nonce = u32; @@ -194,18 +242,13 @@ pub type Balance = u128; /// Unchecked Extrinsic type. pub type UncheckedExtrinsic = - generic::UncheckedExtrinsic, Call, Signature, SignedExtensions>; + generic::UncheckedExtrinsic>; + +/// Account address, used by the Polkadot-like chain. +pub type Address = MultiAddress; /// A type of the data encoded as part of the transaction. -pub type SignedExtra = ( - (), - (), - (), - sp_runtime::generic::Era, - Compact, - (), - Compact, -); +pub type SignedExtra = ((), (), (), sp_runtime::generic::Era, Compact, (), Compact); /// Parameters which are part of the payload used to produce transaction signature, /// but don't end up in the transaction itself (i.e. inherent part of the runtime). @@ -213,7 +256,7 @@ pub type AdditionalSigned = (u32, u32, Hash, Hash, (), (), ()); /// A simplified version of signed extensions meant for producing signed transactions /// and signed payload in the client code. -#[derive(PartialEq, Eq, Clone, RuntimeDebug, scale_info::TypeInfo)] +#[derive(PartialEq, Eq, Clone, RuntimeDebug, TypeInfo)] pub struct SignedExtensions { encode_payload: SignedExtra, additional_signed: AdditionalSigned, @@ -227,7 +270,9 @@ impl parity_scale_codec::Encode for SignedExtensions { } impl parity_scale_codec::Decode for SignedExtensions { - fn decode(_input: &mut I) -> Result { + fn decode( + _input: &mut I, + ) -> Result { unimplemented!("SignedExtensions are never meant to be decoded, they are only used to create transaction"); } } @@ -235,26 +280,26 @@ impl parity_scale_codec::Decode for SignedExtensions { impl SignedExtensions { pub fn new( version: sp_version::RuntimeVersion, - era: sp_runtime::generic::Era, + era: bp_runtime::TransactionEraOf, genesis_hash: Hash, nonce: Nonce, tip: Balance, ) -> Self { Self { encode_payload: ( - (), // spec version - (), // tx version - (), // genesis - era, // era - nonce.into(), // nonce (compact encoding) - (), // Check weight - tip.into(), // transaction payment / tip (compact encoding) + (), // spec version + (), // tx version + (), // genesis + era.frame_era(), // era + nonce.into(), // nonce (compact encoding) + (), // Check weight + tip.into(), // transaction payment / tip (compact encoding) ), additional_signed: ( version.spec_version, version.transaction_version, genesis_hash, - genesis_hash, + era.signed_payload(genesis_hash), (), (), (), @@ -264,6 +309,18 @@ impl SignedExtensions { } } +impl SignedExtensions { + /// Return signer nonce, used to craft transaction. + pub fn nonce(&self) -> Nonce { + self.encode_payload.4.into() + } + + /// Return transaction tip. + pub fn tip(&self) -> Balance { + self.encode_payload.6.into() + } +} + impl sp_runtime::traits::SignedExtension for SignedExtensions where Call: parity_scale_codec::Codec @@ -273,7 +330,7 @@ where + Clone + Eq + PartialEq - + scale_info::StaticTypeInfo, + + StaticTypeInfo, Call: Dispatchable, { const IDENTIFIER: &'static str = "Not needed."; @@ -283,7 +340,9 @@ where type AdditionalSigned = AdditionalSigned; type Pre = (); - fn additional_signed(&self) -> Result { + fn additional_signed( + &self, + ) -> Result { Ok(self.additional_signed) } } @@ -297,6 +356,11 @@ impl Chain for PolkadotLike { type Hash = Hash; type Hasher = Hasher; type Header = Header; + + type AccountId = AccountId; + type Balance = Balance; + type Index = Index; + type Signature = Signature; } /// Convert a 256-bit hash into an AccountId. @@ -311,7 +375,7 @@ impl Convert for AccountIdConverter { /// Return a storage key for account data. /// /// This is based on FRAME storage-generation code from Substrate: -/// https://github.com/paritytech/substrate/blob/c939ceba381b6313462d47334f775e128ea4e95d/frame/support/src/storage/generator/map.rs#L74 +/// [link](https://github.com/paritytech/substrate/blob/c939ceba381b6313462d47334f775e128ea4e95d/frame/support/src/storage/generator/map.rs#L74) /// The equivalent command to invoke in case full `Runtime` is known is this: /// `let key = frame_system::Account::::storage_map_final_key(&account_id);` pub fn account_info_storage_key(id: &AccountId) -> Vec { @@ -319,7 +383,9 @@ pub fn account_info_storage_key(id: &AccountId) -> Vec { let storage_prefix_hashed = Twox128::hash(b"Account"); let key_hashed = parity_scale_codec::Encode::using_encoded(id, Blake2_128Concat::hash); - let mut final_key = Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len()); + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(), + ); final_key.extend_from_slice(&module_prefix_hashed[..]); final_key.extend_from_slice(&storage_prefix_hashed[..]); @@ -347,8 +413,8 @@ mod tests { #[test] fn should_generate_storage_key() { let acc = [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, - 30, 31, 32, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, ] .into(); let key = account_info_storage_key(&acc); diff --git a/polkadot/bridges/primitives/runtime/Cargo.toml b/polkadot/bridges/primitives/runtime/Cargo.toml index cdd47e30d3f8ca313dc9488b8ac4c17e05c86d84..7cc165fb4e9c418519b6fb6b8013572f312b4d66 100644 --- a/polkadot/bridges/primitives/runtime/Cargo.toml +++ b/polkadot/bridges/primitives/runtime/Cargo.toml @@ -7,24 +7,23 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } hash-db = { version = "0.15.2", default-features = false } num-traits = { version = "0.2", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } - +hex-literal = "0.3" [features] default = ["std"] diff --git a/polkadot/bridges/primitives/runtime/src/chain.rs b/polkadot/bridges/primitives/runtime/src/chain.rs index cb19c6e72681d8a1d6efd3fea0ed2f66910f6445..e24694bf8b0f86ea1c34f3f3a6c0e13eb0a61504 100644 --- a/polkadot/bridges/primitives/runtime/src/chain.rs +++ b/polkadot/bridges/primitives/runtime/src/chain.rs @@ -15,12 +15,15 @@ // along with Parity Bridges Common. If not, see . use frame_support::Parameter; -use num_traits::AsPrimitive; -use sp_runtime::traits::{ - AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, MaybeMallocSizeOf, MaybeSerializeDeserialize, - Member, SimpleBitOps, +use num_traits::{AsPrimitive, Bounded, CheckedSub, SaturatingAdd, Zero}; +use sp_runtime::{ + traits::{ + AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, + MaybeMallocSizeOf, MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify, + }, + FixedPointOperand, }; -use sp_std::str::FromStr; +use sp_std::{convert::TryFrom, fmt::Debug, hash::Hash, str::FromStr}; /// Minimal Substrate-based chain representation that may be used from no_std environment. pub trait Chain: Send + Sync + 'static { @@ -34,7 +37,7 @@ pub trait Chain: Send + Sync + 'static { type BlockNumber: Parameter + Member + MaybeSerializeDeserialize - + sp_std::hash::Hash + + Hash + Copy + Default + MaybeDisplay @@ -42,7 +45,10 @@ pub trait Chain: Send + Sync + 'static { + FromStr + MaybeMallocSizeOf + AsPrimitive - + Default; + + Default + // original `sp_runtime::traits::Header::BlockNumber` doesn't have this trait, but + // `sp_runtime::generic::Era` requires block number -> `u64` conversion. + + Into; /// A type that fulfills the abstract idea of what a Substrate hash is. // Constraits come from the associated Hash type of `sp_runtime::traits::Header` @@ -51,7 +57,7 @@ pub trait Chain: Send + Sync + 'static { type Hash: Parameter + Member + MaybeSerializeDeserialize - + sp_std::hash::Hash + + Hash + Ord + Copy + MaybeDisplay @@ -71,7 +77,48 @@ pub trait Chain: Send + Sync + 'static { /// A type that fulfills the abstract idea of what a Substrate header is. // See here for more info: // https://crates.parity.io/sp_runtime/traits/trait.Header.html - type Header: Parameter + HeaderT + MaybeSerializeDeserialize; + type Header: Parameter + + HeaderT + + MaybeSerializeDeserialize; + + /// The user account identifier type for the runtime. + type AccountId: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + Ord + + Default; + /// Balance of an account in native tokens. + /// + /// The chain may support multiple tokens, but this particular type is for token that is used + /// to pay for transaction dispatch, to reward different relayers (headers, messages), etc. + type Balance: AtLeast32BitUnsigned + + FixedPointOperand + + Parameter + + Parameter + + Member + + MaybeSerializeDeserialize + + Clone + + Copy + + Bounded + + CheckedSub + + PartialOrd + + SaturatingAdd + + Zero + + TryFrom; + /// Index of a transaction used by the chain. + type Index: Parameter + + Member + + MaybeSerialize + + Debug + + Default + + MaybeDisplay + + MaybeSerializeDeserialize + + AtLeast32Bit + + Copy; + /// Signature type, used on this chain. + type Signature: Parameter + Verify; } /// Block number used by the chain. @@ -85,3 +132,21 @@ pub type HasherOf = ::Hasher; /// Header type used by the chain. pub type HeaderOf = ::Header; + +/// Account id type used by the chain. +pub type AccountIdOf = ::AccountId; + +/// Balance type used by the chain. +pub type BalanceOf = ::Balance; + +/// Transaction index type used by the chain. +pub type IndexOf = ::Index; + +/// Signature type used by the chain. +pub type SignatureOf = ::Signature; + +/// Account public type used by the chain. +pub type AccountPublicOf = as Verify>::Signer; + +/// Transaction era used by the chain. +pub type TransactionEraOf = crate::TransactionEra, HashOf>; diff --git a/polkadot/bridges/primitives/runtime/src/lib.rs b/polkadot/bridges/primitives/runtime/src/lib.rs index a4bb400a93c45f273c92ef250001a9cb416f2649..051dc1f43c002e93d4be74a293ed5efa6bcbc886 100644 --- a/polkadot/bridges/primitives/runtime/src/lib.rs +++ b/polkadot/bridges/primitives/runtime/src/lib.rs @@ -19,11 +19,16 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use sp_core::hash::H256; +use frame_support::{RuntimeDebug, StorageHasher}; +use sp_core::{hash::H256, storage::StorageKey}; use sp_io::hashing::blake2_256; -use sp_std::convert::TryFrom; +use sp_std::{convert::TryFrom, vec::Vec}; -pub use chain::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; +pub use chain::{ + AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf, + IndexOf, SignatureOf, TransactionEraOf, +}; +pub use frame_support::storage::storage_prefix as storage_value_final_key; pub use storage_proof::{Error as StorageProofError, StorageProofChecker}; #[cfg(feature = "std")] @@ -64,19 +69,24 @@ pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/ /// A unique prefix for entropy when generating a cross-chain account ID for the Root account. pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root"; +/// Generic header Id. +#[derive(RuntimeDebug, Default, Clone, Copy, Eq, Hash, PartialEq)] +pub struct HeaderId(pub Number, pub Hash); + /// Unique identifier of the chain. /// /// In addition to its main function (identifying the chain), this type may also be used to /// identify module instance. We have a bunch of pallets that may be used in different bridges. E.g. -/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and Chain2. -/// Sometimes we need to be able to identify deployed instance dynamically. This type may be used for that. +/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and +/// Chain2. Sometimes we need to be able to identify deployed instance dynamically. This type may be +/// used for that. pub type ChainId = [u8; 4]; /// Type of accounts on the source chain. pub enum SourceAccount { /// An account that belongs to Root (privileged origin). Root, - /// A non-priviledged account. + /// A non-privileged account. /// /// The embedded account ID may or may not have a private key depending on the "owner" of the /// account (private key, pallet, proxy, etc.). @@ -99,8 +109,10 @@ where AccountId: Encode, { match id { - SourceAccount::Root => (ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256), - SourceAccount::Account(id) => (ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256), + SourceAccount::Root => + (ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256), + SourceAccount::Account(id) => + (ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256), } .into() } @@ -109,8 +121,8 @@ where /// /// This account is used to collect fees for relayers that are passing messages across the bridge. /// -/// The account ID can be the same across different instances of `pallet-bridge-messages` if the same -/// `bridge_id` is used. +/// The account ID can be the same across different instances of `pallet-bridge-messages` if the +/// same `bridge_id` is used. pub fn derive_relayer_fund_account_id(bridge_id: ChainId) -> H256 { ("relayer-fund-account", bridge_id).using_encoded(blake2_256).into() } @@ -124,6 +136,12 @@ pub trait Size { fn size_hint(&self) -> u32; } +impl Size for &[u8] { + fn size_hint(&self) -> u32 { + self.len() as _ + } +} + impl Size for () { fn size_hint(&self) -> u32 { 0 @@ -138,3 +156,122 @@ impl Size for PreComputedSize { u32::try_from(self.0).unwrap_or(u32::MAX) } } + +/// Era of specific transaction. +#[derive(RuntimeDebug, Clone, Copy)] +pub enum TransactionEra { + /// Transaction is immortal. + Immortal, + /// Transaction is valid for a given number of blocks, starting from given block. + Mortal(HeaderId, u32), +} + +impl, BlockHash: Copy> TransactionEra { + /// Prepare transaction era, based on mortality period and current best block number. + pub fn new( + best_block_id: HeaderId, + mortality_period: Option, + ) -> Self { + mortality_period + .map(|mortality_period| TransactionEra::Mortal(best_block_id, mortality_period)) + .unwrap_or(TransactionEra::Immortal) + } + + /// Create new immortal transaction era. + pub fn immortal() -> Self { + TransactionEra::Immortal + } + + /// Returns era that is used by FRAME-based runtimes. + pub fn frame_era(&self) -> sp_runtime::generic::Era { + match *self { + TransactionEra::Immortal => sp_runtime::generic::Era::immortal(), + TransactionEra::Mortal(header_id, period) => + sp_runtime::generic::Era::mortal(period as _, header_id.0.into()), + } + } + + /// Returns header hash that needs to be included in the signature payload. + pub fn signed_payload(&self, genesis_hash: BlockHash) -> BlockHash { + match *self { + TransactionEra::Immortal => genesis_hash, + TransactionEra::Mortal(header_id, _) => header_id.1, + } + } +} + +/// This is a copy of the +/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for `Blake2_128Concat` +/// maps. +/// +/// We're using it because to call `storage_map_final_key` directly, we need access to the runtime +/// and pallet instance, which (sometimes) is impossible. +pub fn storage_map_final_key_blake2_128concat( + pallet_prefix: &str, + map_name: &str, + key: &[u8], +) -> StorageKey { + storage_map_final_key_identity( + pallet_prefix, + map_name, + &frame_support::Blake2_128Concat::hash(key), + ) +} + +/// +pub fn storage_map_final_key_twox64_concat( + pallet_prefix: &str, + map_name: &str, + key: &[u8], +) -> StorageKey { + storage_map_final_key_identity(pallet_prefix, map_name, &frame_support::Twox64Concat::hash(key)) +} + +/// This is a copy of the +/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for `Identity` maps. +/// +/// We're using it because to call `storage_map_final_key` directly, we need access to the runtime +/// and pallet instance, which (sometimes) is impossible. +pub fn storage_map_final_key_identity( + pallet_prefix: &str, + map_name: &str, + key_hashed: &[u8], +) -> StorageKey { + let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes()); + let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes()); + + let mut final_key = Vec::with_capacity( + pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(), + ); + + final_key.extend_from_slice(&pallet_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key_hashed.as_ref()); + + StorageKey(final_key) +} + +/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false; +/// }`) is computed. +/// +/// Copied from `frame_support::parameter_types` macro +pub fn storage_parameter_key(parameter_name: &str) -> StorageKey { + let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1); + buffer.push(b':'); + buffer.extend_from_slice(parameter_name.as_bytes()); + buffer.push(b':'); + StorageKey(sp_io::hashing::twox_128(&buffer).to_vec()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn storage_parameter_key_works() { + assert_eq!( + storage_parameter_key("MillauToRialtoConversionRate"), + StorageKey(hex_literal::hex!("58942375551bb0af1682f72786b59d04").to_vec()), + ); + } +} diff --git a/polkadot/bridges/primitives/runtime/src/messages.rs b/polkadot/bridges/primitives/runtime/src/messages.rs index 3ab867773f9f67a837a1035629ecd71eb40ac30b..7a6687c18b776e311dccab8a929abedfd7310f8e 100644 --- a/polkadot/bridges/primitives/runtime/src/messages.rs +++ b/polkadot/bridges/primitives/runtime/src/messages.rs @@ -23,7 +23,7 @@ use scale_info::TypeInfo; /// Where message dispatch fee is paid? #[derive(Encode, Decode, RuntimeDebug, Clone, Copy, PartialEq, Eq, TypeInfo)] pub enum DispatchFeePayment { - /// The dispacth fee is paid at the source chain. + /// The dispatch fee is paid at the source chain. AtSourceChain, /// The dispatch fee is paid at the target chain. /// @@ -51,7 +51,7 @@ pub struct MessageDispatchResult { /// 2) if message has not been dispatched at all. pub unspent_weight: Weight, /// Whether the message dispatch fee has been paid during dispatch. This will be true if your - /// configuration supports pay-dispatch-fee-at-target-chain option and message sender has enabled - /// this option. + /// configuration supports pay-dispatch-fee-at-target-chain option and message sender has + /// enabled this option. pub dispatch_fee_paid_during_dispatch: bool, } diff --git a/polkadot/bridges/primitives/runtime/src/storage_proof.rs b/polkadot/bridges/primitives/runtime/src/storage_proof.rs index d70be93b1d2513648011351be1bcfd854095325b..9cc5b48ebd913319e4be1f29be5ea8dbcb268e60 100644 --- a/polkadot/bridges/primitives/runtime/src/storage_proof.rs +++ b/polkadot/bridges/primitives/runtime/src/storage_proof.rs @@ -42,7 +42,7 @@ where pub fn new(root: H::Out, proof: StorageProof) -> Result { let db = proof.into_memory_db(); if !db.contains(&root, EMPTY_PREFIX) { - return Err(Error::StorageRootMismatch); + return Err(Error::StorageRootMismatch) } let checker = StorageProofChecker { root, db }; @@ -52,7 +52,8 @@ where /// Reads a value from the available subset of storage. If the value cannot be read due to an /// incomplete or otherwise invalid proof, this returns an error. pub fn read_value(&self, key: &[u8]) -> Result>, Error> { - read_trie_value::, _>(&self.db, &self.root, key).map_err(|_| Error::StorageValueUnavailable) + read_trie_value::, _>(&self.db, &self.root, key) + .map_err(|_| Error::StorageValueUnavailable) } } @@ -97,7 +98,8 @@ pub mod tests { let (root, proof) = craft_valid_storage_proof(); // check proof in runtime - let checker = >::new(root, proof.clone()).unwrap(); + let checker = + >::new(root, proof.clone()).unwrap(); assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); diff --git a/polkadot/bridges/primitives/test-utils/Cargo.toml b/polkadot/bridges/primitives/test-utils/Cargo.toml index 14d3c031b4c8d4fa22952408c5518f4d4922c4e0..95121c60993a7e675f56fdb92cc6021f5b4b7761 100644 --- a/polkadot/bridges/primitives/test-utils/Cargo.toml +++ b/polkadot/bridges/primitives/test-utils/Cargo.toml @@ -7,7 +7,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] bp-header-chain = { path = "../header-chain", default-features = false } -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false } ed25519-dalek = { version = "1.0", default-features = false, features = ["u64_backend"] } finality-grandpa = { version = "0.14.4", default-features = false } sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/polkadot/bridges/primitives/test-utils/src/keyring.rs b/polkadot/bridges/primitives/test-utils/src/keyring.rs index b83678cae5e5196033e2c94a982945a7c6d3b857..059d6eb5be4f1e910679eab6e1a7db727fbcb506 100644 --- a/polkadot/bridges/primitives/test-utils/src/keyring.rs +++ b/polkadot/bridges/primitives/test-utils/src/keyring.rs @@ -45,7 +45,8 @@ impl Account { let data = self.0.encode(); let mut bytes = [0_u8; 32]; bytes[0..data.len()].copy_from_slice(&*data); - SecretKey::from_bytes(&bytes).expect("A static array of the correct length is a known good.") + SecretKey::from_bytes(&bytes) + .expect("A static array of the correct length is a known good.") } pub fn pair(&self) -> Keypair { @@ -57,7 +58,8 @@ impl Account { let public = self.public(); pair[32..].copy_from_slice(&public.to_bytes()); - Keypair::from_bytes(&pair).expect("We expect the SecretKey to be good, so this must also be good.") + Keypair::from_bytes(&pair) + .expect("We expect the SecretKey to be good, so this must also be good.") } pub fn sign(&self, msg: &[u8]) -> Signature { @@ -79,10 +81,7 @@ pub fn voter_set() -> VoterSet { /// Convenience function to get a list of Grandpa authorities. pub fn authority_list() -> AuthorityList { - test_keyring() - .iter() - .map(|(id, w)| (AuthorityId::from(*id), *w)) - .collect() + test_keyring().iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect() } /// Get the corresponding identities from the keyring for the "standard" authority set. diff --git a/polkadot/bridges/primitives/test-utils/src/lib.rs b/polkadot/bridges/primitives/test-utils/src/lib.rs index 64109754086cf6cfd710afb68f666d098d436678..9e044ed472dd743ae93d459bd9f2b713aae8931b 100644 --- a/polkadot/bridges/primitives/test-utils/src/lib.rs +++ b/polkadot/bridges/primitives/test-utils/src/lib.rs @@ -21,8 +21,7 @@ use bp_header_chain::justification::GrandpaJustification; use codec::Encode; use sp_application_crypto::TryFrom; -use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; -use sp_finality_grandpa::{AuthoritySignature, SetId}; +use sp_finality_grandpa::{AuthorityId, AuthoritySignature, AuthorityWeight, SetId}; use sp_runtime::traits::{Header as HeaderT, One, Zero}; use sp_std::prelude::*; @@ -49,7 +48,7 @@ pub struct JustificationGeneratorParams { pub authorities: Vec<(Account, AuthorityWeight)>, /// The total number of precommit ancestors in the `votes_ancestries` field our justification. /// - /// These may be distributed among many different forks. + /// These may be distributed among many forks. pub ancestors: u32, /// The number of forks. /// @@ -72,10 +71,7 @@ impl Default for JustificationGeneratorParams { /// Make a valid GRANDPA justification with sensible defaults pub fn make_default_justification(header: &H) -> GrandpaJustification { - let params = JustificationGeneratorParams:: { - header: header.clone(), - ..Default::default() - }; + let params = JustificationGeneratorParams:: { header: header.clone(), ..Default::default() }; make_justification_for_header(params) } @@ -89,15 +85,11 @@ pub fn make_default_justification(header: &H) -> GrandpaJustificatio /// /// Note: This needs at least three authorities or else the verifier will complain about /// being given an invalid commit. -pub fn make_justification_for_header(params: JustificationGeneratorParams) -> GrandpaJustification { - let JustificationGeneratorParams { - header, - round, - set_id, - authorities, - mut ancestors, - forks, - } = params; +pub fn make_justification_for_header( + params: JustificationGeneratorParams, +) -> GrandpaJustification { + let JustificationGeneratorParams { header, round, set_id, authorities, mut ancestors, forks } = + params; let (target_hash, target_number) = (header.hash(), *header.number()); let mut votes_ancestries = vec![]; let mut precommits = vec![]; @@ -144,11 +136,7 @@ pub fn make_justification_for_header(params: JustificationGeneratorP GrandpaJustification { round, - commit: finality_grandpa::Commit { - target_hash, - target_number, - precommits, - }, + commit: finality_grandpa::Commit { target_hash, target_number, precommits }, votes_ancestries, } } @@ -165,10 +153,7 @@ fn generate_chain(fork_id: u32, depth: u32, ancestor: &H) -> Vec // Modifying the digest so headers at the same height but in different forks have different // hashes - header - .digest_mut() - .logs - .push(sp_runtime::DigestItem::Other(fork_id.encode())); + header.digest_mut().logs.push(sp_runtime::DigestItem::Other(fork_id.encode())); headers.push(header); } @@ -183,29 +168,26 @@ pub fn signed_precommit( round: u64, set_id: SetId, ) -> finality_grandpa::SignedPrecommit { - let precommit = finality_grandpa::Precommit { - target_hash: target.0, - target_number: target.1, - }; + let precommit = finality_grandpa::Precommit { target_hash: target.0, target_number: target.1 }; - let encoded = - sp_finality_grandpa::localized_payload(round, set_id, &finality_grandpa::Message::Precommit(precommit.clone())); + let encoded = sp_finality_grandpa::localized_payload( + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), + ); let signature = signer.sign(&encoded); let raw_signature: Vec = signature.to_bytes().into(); - // Need to wrap our signature and id types that they match what our `SignedPrecommit` is expecting + // Need to wrap our signature and id types that they match what our `SignedPrecommit` is + // expecting let signature = AuthoritySignature::try_from(raw_signature).expect( "We know our Keypair is good, so our signature must also be good.", ); let id = (*signer).into(); - finality_grandpa::SignedPrecommit { - precommit, - signature, - id, - } + finality_grandpa::SignedPrecommit { precommit, signature, id } } /// Get a header for testing. @@ -213,13 +195,7 @@ pub fn signed_precommit( /// The correct parent hash will be used if given a non-zero header. pub fn test_header(number: H::Number) -> H { let default = |num| { - H::new( - num, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) + H::new(num, Default::default(), Default::default(), Default::default(), Default::default()) }; let mut header = default(number); diff --git a/polkadot/bridges/primitives/currency-exchange/Cargo.toml b/polkadot/bridges/primitives/token-swap/Cargo.toml similarity index 55% rename from polkadot/bridges/primitives/currency-exchange/Cargo.toml rename to polkadot/bridges/primitives/token-swap/Cargo.toml index 43367ba7992b1751825c775c397a9d2945f20c36..4b16c3567ea6eec862733abf4d44a0a40919795a 100644 --- a/polkadot/bridges/primitives/currency-exchange/Cargo.toml +++ b/polkadot/bridges/primitives/token-swap/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "bp-currency-exchange" -description = "Primitives of currency exchange module." +name = "bp-token-swap" +description = "Primitives of the pallet-bridge-token-swap pallet" version = "0.1.0" authors = ["Parity Technologies "] edition = "2018" @@ -8,18 +8,20 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } # Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } [features] default = ["std"] std = [ "codec/std", "frame-support/std", - "sp-api/std", + "scale-info/std", + "sp-core/std", "sp-std/std", ] diff --git a/polkadot/bridges/primitives/token-swap/src/lib.rs b/polkadot/bridges/primitives/token-swap/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..d46389e86891d68d9f14b68eda5574bf96c7b027 --- /dev/null +++ b/polkadot/bridges/primitives/token-swap/src/lib.rs @@ -0,0 +1,109 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::{weights::Weight, RuntimeDebug}; +use scale_info::TypeInfo; +use sp_core::U256; +use sp_std::vec::Vec; + +/// Pending token swap state. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] +pub enum TokenSwapState { + /// The swap has been started using the `start_claim` call, but we have no proof that it has + /// happened at the Bridged chain. + Started, + /// The swap has happened at the Bridged chain and may be claimed by the Bridged chain party + /// using the `claim_swap` call. + Confirmed, + /// The swap has failed at the Bridged chain and This chain party may cancel it using the + /// `cancel_swap` call. + Failed, +} + +/// Token swap type. +/// +/// Different swap types give a different guarantees regarding possible swap +/// replay protection. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] +pub enum TokenSwapType { + /// The `target_account_at_bridged_chain` is temporary and only have funds for single swap. + /// + /// ***WARNING**: if `target_account_at_bridged_chain` still exists after the swap has been + /// completed (either by claiming or canceling), the `source_account_at_this_chain` will be + /// able to restart the swap again and repeat the swap until `target_account_at_bridged_chain` + /// depletes. + TemporaryTargetAccountAtBridgedChain, + /// This swap type prevents `source_account_at_this_chain` from restarting the swap after it + /// has been completed. There are two consequences: + /// + /// 1) the `source_account_at_this_chain` won't be able to call `start_swap` after given + /// ; 2) the `target_account_at_bridged_chain` won't be able to call + /// `claim_swap` (over the bridge) before block ``. + /// + /// The second element is the nonce of the swap. You must care about its uniqueness if you're + /// planning to perform another swap with exactly the same parameters (i.e. same amount, same + /// accounts, same `ThisBlockNumber`) to avoid collisions. + LockClaimUntilBlock(ThisBlockNumber, U256), +} + +/// An intention to swap `source_balance_at_this_chain` owned by `source_account_at_this_chain` +/// to `target_balance_at_bridged_chain` owned by `target_account_at_bridged_chain`. +/// +/// **IMPORTANT NOTE**: this structure is always the same during single token swap. So even +/// when chain changes, the meaning of This and Bridged are still used to point to the same chains. +/// This chain is always the chain where swap has been started. And the Bridged chain is the other +/// chain. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] +pub struct TokenSwap +{ + /// The type of the swap. + pub swap_type: TokenSwapType, + /// This chain balance to be swapped with `target_balance_at_bridged_chain`. + pub source_balance_at_this_chain: ThisBalance, + /// Account id of the party acting at This chain and owning the `source_account_at_this_chain`. + pub source_account_at_this_chain: ThisAccountId, + /// Bridged chain balance to be swapped with `source_balance_at_this_chain`. + pub target_balance_at_bridged_chain: BridgedBalance, + /// Account id of the party acting at the Bridged chain and owning the + /// `target_balance_at_bridged_chain`. + pub target_account_at_bridged_chain: BridgedAccountId, +} + +/// SCALE-encoded `Currency::transfer` call on the bridged chain. +pub type RawBridgedTransferCall = Vec; + +/// Token swap creation parameters. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] +pub struct TokenSwapCreation { + /// Public key of the `target_account_at_bridged_chain` account used to verify + /// `bridged_currency_transfer_signature`. + pub target_public_at_bridged_chain: BridgedAccountPublic, + /// Fee that the `source_account_at_this_chain` is ready to pay for the tokens + /// transfer message delivery and dispatch. + pub swap_delivery_and_dispatch_fee: ThisChainBalance, + /// Specification version of the Bridged chain. + pub bridged_chain_spec_version: u32, + /// SCALE-encoded tokens transfer call at the Bridged chain. + pub bridged_currency_transfer: RawBridgedTransferCall, + /// Dispatch weight of the tokens transfer call at the Bridged chain. + pub bridged_currency_transfer_weight: Weight, + /// The signature of the `target_account_at_bridged_chain` for the message + /// returned by the `pallet_bridge_dispatch::account_ownership_digest()` function call. + pub bridged_currency_transfer_signature: BridgedAccountSignature, +} diff --git a/polkadot/bridges/relays/bin-ethereum/Cargo.toml b/polkadot/bridges/relays/bin-ethereum/Cargo.toml deleted file mode 100644 index efd9c0194b28cae3164238fa1a82cb4000f9b14d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "ethereum-poa-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -ansi_term = "0.12" -async-std = "1.9.0" -async-trait = "0.1.42" -clap = { version = "2.33.3", features = ["yaml"] } -codec = { package = "parity-scale-codec", version = "2.0.0" } -env_logger = "0.8.3" -ethabi = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } -ethabi-contract = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } -ethabi-derive = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } -futures = "0.3.12" -hex = "0.4" -hex-literal = "0.3" -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } -log = "0.4.14" -num-traits = "0.2" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.64" -time = "0.2" - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange" } -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -exchange-relay = { path = "../exchange" } -headers-relay = { path = "../headers" } -messages-relay = { path = "../messages" } -relay-ethereum-client = { path = "../client-ethereum" } -relay-rialto-client = { path = "../client-rialto" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/bin-ethereum/README.md b/polkadot/bridges/relays/bin-ethereum/README.md deleted file mode 100644 index 9fe2f623fd05aebed50c17b464c1cefefedd2d82..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# PoA <> Substrate Bridge - -**DISCLAIMER:** *we recommend not using the bridge in "production" (to bridge significant amounts) just yet. -it's missing a code audit and should still be considered alpha. we can't rule out that there are bugs that might result in loss of the bridged amounts. -we'll update this disclaimer once that changes* - -These docs are very incomplete yet. Describe high-level goals here in the (near) future. diff --git a/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json b/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json deleted file mode 100644 index b7d7b4b9152cdf9ecce81b09e5b4261832ab3d7f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-abi.json +++ /dev/null @@ -1,167 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawInitialHeader", - "type": "bytes" - }, - { - "internalType": "uint64", - "name": "initialValidatorsSetId", - "type": "uint64" - }, - { - "internalType": "bytes", - "name": "initialValidatorsSet", - "type": "bytes" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "stateMutability": "nonpayable", - "type": "fallback" - }, - { - "inputs": [], - "name": "bestKnownHeader", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "finalityTargetNumber", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "finalityTargetHash", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "rawFinalityProof", - "type": "bytes" - } - ], - "name": "importFinalityProof", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "importHeaders", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "incompleteHeaders", - "outputs": [ - { - "internalType": "uint256[]", - "name": "", - "type": "uint256[]" - }, - { - "internalType": "bytes32[]", - "name": "", - "type": "bytes32[]" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "isIncompleteHeaders", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "headerHash", - "type": "bytes32" - } - ], - "name": "isKnownHeader", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - } -] diff --git a/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex b/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex deleted file mode 100644 index 6dd6a33046f6c826b7d1b0990e620c5c60719821..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-bytecode.hex +++ /dev/null @@ -1 +0,0 @@ -60806040523480156200001157600080fd5b5060405162001af838038062001af8833981810160405260608110156200003757600080fd5b81019080805160405193929190846401000000008211156200005857600080fd5b9083019060208201858111156200006e57600080fd5b82516401000000008111828201881017156200008957600080fd5b82525081516020918201929091019080838360005b83811015620000b85781810151838201526020016200009e565b50505050905090810190601f168015620000e65780820380516001836020036101000a031916815260200191505b506040818152602083015192018051929491939192846401000000008211156200010f57600080fd5b9083019060208201858111156200012557600080fd5b82516401000000008111828201881017156200014057600080fd5b82525081516020918201929091019080838360005b838110156200016f57818101518382015260200162000155565b50505050905090810190601f1680156200019d5780820380516001836020036101000a031916815260200191505b50604052505050620001ae620003d5565b620001c2846001600160e01b03620002dc16565b805160008181556002918255604080840180516001908155825160e08101845281815260208088015181830190815293518286019081526080808a0151606085019081526001600160401b038e169185019190915260a0840188905260c084018890528951885260078352959096208251815460ff191690151517815593519284019290925593519482019490945590518051949550919390926200026f9260038501929101906200040a565b506080820151600482810180546001600160401b03199081166001600160401b039485161790915560a0850151600585015560c09094015160069093019290925560038054909316908616179091558251620002d1919060208501906200040a565b5050505050620004af565b620002e6620003d5565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa6200031c57600080fd5b84519b5083519a50825199508151985080519750505050505050506060816001600160401b03811180156200035057600080fd5b506040519080825280601f01601f1916602001820160405280156200037c576020820181803683370190505b5090508115620003a85787516020890160208301848184846011600019fa620003a457600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200044d57805160ff19168380011785556200047d565b828001600101855582156200047d579182015b828111156200047d57825182559160200191906001019062000460565b506200048b9291506200048f565b5090565b620004ac91905b808211156200048b576000815560010162000496565b90565b61163980620004bf6000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c8063374c2c26146100675780636a742c0914610108578063871ebe181461033d578063d96a2deb1461036e578063e8ffbe841461038f578063fae71ae8146105d4575b600080fd5b61006f610684565b604051808060200180602001838103835285818151815260200191508051906020019060200280838360005b838110156100b357818101518382015260200161009b565b50505050905001838103825284818151815260200191508051906020019060200280838360005b838110156100f25781810151838201526020016100da565b5050505090500194505050505060405180910390f35b61033b6004803603608081101561011e57600080fd5b810190602081018135600160201b81111561013857600080fd5b82018360208201111561014a57600080fd5b803590602001918460018302840111600160201b8311171561016b57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156101bd57600080fd5b8201836020820111156101cf57600080fd5b803590602001918460018302840111600160201b831117156101f057600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561024257600080fd5b82018360208201111561025457600080fd5b803590602001918460018302840111600160201b8311171561027557600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156102c757600080fd5b8201836020820111156102d957600080fd5b803590602001918460018302840111600160201b831117156102fa57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610789945050505050565b005b61035a6004803603602081101561035357600080fd5b50356107e5565b604080519115158252519081900360200190f35b6103766107fd565b6040805192835260208301919091528051918290030190f35b6105c2600480360360808110156103a557600080fd5b810190602081018135600160201b8111156103bf57600080fd5b8201836020820111156103d157600080fd5b803590602001918460018302840111600160201b831117156103f257600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561044457600080fd5b82018360208201111561045657600080fd5b803590602001918460018302840111600160201b8311171561047757600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156104c957600080fd5b8201836020820111156104db57600080fd5b803590602001918460018302840111600160201b831117156104fc57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561054e57600080fd5b82018360208201111561056057600080fd5b803590602001918460018302840111600160201b8311171561058157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610815945050505050565b60408051918252519081900360200190f35b61033b600480360360608110156105ea57600080fd5b813591602081013591810190606081016040820135600160201b81111561061057600080fd5b82018360208201111561062257600080fd5b803590602001918460018302840111600160201b8311171561064357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610b28945050505050565b6005546060908190818167ffffffffffffffff811180156106a457600080fd5b506040519080825280602002602001820160405280156106ce578160200160208202803683370190505b50905060005b828110156107295760076000600583815481106106ed57fe5b906000526020600020015481526020019081526020016000206002015482828151811061071657fe5b60209081029190910101526001016106d4565b508060058080548060200260200160405190810160405280929190818152602001828054801561077857602002820191906000526020600020905b815481526020019060010190808311610764575b505050505090509350935050509091565b61079284610d8d565b61079b576107df565b8251156107b4576107ab83610d8d565b6107b4576107df565b8151156107cd576107c482610d8d565b6107cd576107df565b8051156107df576107dd81610d8d565b505b50505050565b60008181526007602052604090205460ff165b919050565b60008054808252600760205260409091206002015491565b600061081f611454565b61082886610f0e565b9050610832611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156108f85780601f106108cd576101008083540402835291602001916108f8565b820191906000526020600020905b8154815290600101906020018083116108db57829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506000806109398484611001565b945050505091506000600681111561094d57fe5b82600681111561095957fe5b146109ab576040805162461bcd60e51b815260206004820152601860248201527f43616e277420696d706f727420616e7920686561646572730000000000000000604482015290519081900360640190fd5b83604001518114156109c4576001945050505050610b20565b87516109d7576000945050505050610b20565b6109df611489565b6109e98585611171565b90506109f3611454565b6109fc8a610f0e565b90506000610a0a8284611001565b9450505050508160400151811415610a2c576002975050505050505050610b20565b8951610a42576000975050505050505050610b20565b610a4a611489565b610a548388611171565b9050610a5e611454565b610a678c610f0e565b90506000610a758284611001565b9450505050508160400151811415610a9a5760039a5050505050505050505050610b20565b8b51610ab35760009a5050505050505050505050610b20565b610abb611489565b610ac5838b611171565b9050610acf611454565b610ad88e610f0e565b90506000610ae68284611001565b9450505050508160400151811415610b0e5760049d5050505050505050505050505050610b20565b60009d50505050505050505050505050505b949350505050565b6000828152600760205260409020600201548314610b775760405162461bcd60e51b815260040180806020018281038252602f8152602001806115d5602f913960400191505060405180910390fd5b60028054600354600480546040805160206101006001851615026000190190931696909604601f81018390048302870183019091528086529394600094610c28948a948a9467ffffffffffffffff90921693929091830182828015610c1d5780601f10610bf257610100808354040283529160200191610c1d565b820191906000526020600020905b815481529060010190602001808311610c0057829003601f168201915b5050505050876111d0565b600081815260076020526040902060028281558101546001559091505b828214610d8557506000818152600760209081526040808320600181015460069093529220549092908015610d07576005546000199182019181018214610cd357600060056001830381548110610c9857fe5b906000526020600020015490508060058481548110610cb357fe5b600091825260208083209091019290925591825260069052604090208290555b6005805480610cde57fe5b600082815260208082208301600019908101839055909201909255848252600690526040812055505b826006015483600201541415610d7e57600583015460009081526007602052604090206003805467ffffffffffffffff198116600167ffffffffffffffff92831681019092161782559082018054610d759260049291600261010092821615929092026000190116046114c4565b50505050610d85565b5050610c45565b505050505050565b600080610d98611454565b6000806000610da687611312565b9398509196509450925090506000856006811115610dc057fe5b14610dd3576000955050505050506107f8565b604084015181148015610e27576005805486516001820180845560009384527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0909201558651825260066020526040909120555b6040805160e0810182526001808252602088810151818401908152898501518486019081526080808c01516060870190815267ffffffffffffffff8c169187019190915260a086018a905260c086018990528b51600090815260078552969096208551815460ff1916901515178155915193820193909355915160028301559251805192939192610ebe9260038501920190611549565b50608082015160048201805467ffffffffffffffff191667ffffffffffffffff90921691909117905560a0820151600582015560c090910151600690910155935160005550509015949350505050565b610f16611454565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa610f4b57600080fd5b84519b5083519a508251995081519850805197505050505050505060608167ffffffffffffffff81118015610f7f57600080fd5b506040519080825280601f01601f191660200182016040528015610faa576020820181803683370190505b5090508115610fd45787516020890160208301848184846011600019fa610fd057600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b600061100b611454565b83516000908152600760205260408120548190819060ff161561103d5750600193508592506000915081905080611167565b60015487604001511161105f5750600293508592506000915081905080611167565b8551158061107857506001876040015103866040015114155b156110925750600393508592506000915081905080611167565b60c0860151158015906110ac575085604001518660c00151145b156110d3578660200151600254146110d35750600493508592506000915081905080611167565b60808087015160a088015160c0890151928a01515191929091156111585767ffffffffffffffff838116141561111d57506005965088955060009450849350839250611167915050565b8960400151811061114257506006965088955060009450849350839250611167915050565b50508751606089015160408a0151600190930192015b60009750899650919450925090505b9295509295909350565b611179611489565b506040805160e08101825260018082528451602083015293820151909301908301526060818101519083015260808082015167ffffffffffffffff169083015260a0808201519083015260c0908101519082015290565b600060608686868686604051602001808681526020018581526020018467ffffffffffffffff1667ffffffffffffffff1681526020018060200180602001838103835285818151815260200191508051906020019080838360005b8381101561124357818101518382015260200161122b565b50505050905090810190601f1680156112705780820380516001836020036101000a031916815260200191505b50838103825284518152845160209182019186019080838360005b838110156112a357818101518382015260200161128b565b50505050905090810190601f1680156112d05780820380516001836020036101000a031916815260200191505b50975050505050505050604051602081830303815290604052905080516020820160008083836012600019fa61130557600080fd5b5095979650505050505050565b600061131c611454565b6000806000611329611454565b61133287610f0e565b905061133c611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156114025780601f106113d757610100808354040283529160200191611402565b820191906000526020600020905b8154815290600101906020018083116113e557829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506114408282611001565b939c929b5090995097509095509350505050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b6040805160e0810182526000808252602082018190529181018290526060808201526080810182905260a0810182905260c081019190915290565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106114fd5780548555611539565b8280016001018555821561153957600052602060002091601f016020900482015b8281111561153957825482559160010191906001019061151e565b506115459291506115b7565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061158a57805160ff1916838001178555611539565b82800160010185558215611539579182015b8281111561153957825182559160200191906001019061159c565b6115d191905b8082111561154557600081556001016115bd565b9056fe4d697373696e672066696e616c69747920746172676574206865616465722066726f6d207468652073746f72616765a2646970667358221220edcaec08f93f74ce5be00b81da5d6b2276138571a33f1cfdca50e5047f854e6e64736f6c63430006060033 \ No newline at end of file diff --git a/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt b/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt deleted file mode 100644 index 13b7daa9a8b8b317f4e9fad9b99bb4986dc85a91..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/res/substrate-bridge-metadata.txt +++ /dev/null @@ -1,5 +0,0 @@ -Last Change Date: 2020-07-30 -Solc version: 0.6.6+commit.6c089d02.Linux.g++ -Source hash (keccak256): 0xea5d6d744f69157adc2857166792aca139c0b5b186ba89c1011358fbcad90d7e -Source gist: https://github.com/svyatonik/substrate-bridge-sol/blob/6456d3e016c95cd5e6d5e817c23e9e69e739aa78/substrate-bridge.sol -Compiler flags used (command to produce the file): `docker run -i ethereum/solc:0.6.6 --optimize --bin - < substrate-bridge.sol` \ No newline at end of file diff --git a/polkadot/bridges/relays/bin-ethereum/src/cli.yml b/polkadot/bridges/relays/bin-ethereum/src/cli.yml deleted file mode 100644 index 78971787c0e2b5b42de5cf5578c177c916699eb5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/cli.yml +++ /dev/null @@ -1,166 +0,0 @@ -name: ethsub-bridge -version: "0.1.0" -author: Parity Technologies -about: Parity Ethereum (PoA) <-> Substrate bridge -subcommands: - - eth-to-sub: - about: Synchronize headers from Ethereum node to Substrate node. - args: - - eth-host: ð-host - long: eth-host - value_name: ETH_HOST - help: Connect to Ethereum node websocket server at given host. - takes_value: true - - eth-port: ð-port - long: eth-port - value_name: ETH_PORT - help: Connect to Ethereum node websocket server at given port. - takes_value: true - - sub-host: &sub-host - long: sub-host - value_name: SUB_HOST - help: Connect to Substrate node websocket server at given host. - takes_value: true - - sub-port: &sub-port - long: sub-port - value_name: SUB_PORT - help: Connect to Substrate node websocket server at given port. - takes_value: true - - sub-tx-mode: - long: sub-tx-mode - value_name: MODE - help: Submit headers using signed (default) or unsigned transactions. Third mode - backup - submits signed transactions only when we believe that sync has stalled. - takes_value: true - possible_values: - - signed - - unsigned - - backup - - sub-signer: &sub-signer - long: sub-signer - value_name: SUB_SIGNER - help: The SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-signer-password: &sub-signer-password - long: sub-signer-password - value_name: SUB_SIGNER_PASSWORD - help: The password for the SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-pallet-instance: &sub-pallet-instance - long: instance - short: i - value_name: PALLET_INSTANCE - help: The instance of the bridge pallet the relay should follow. - takes_value: true - case_insensitive: true - possible_values: - - Rialto - - Kovan - default_value: Rialto - - no-prometheus: &no-prometheus - long: no-prometheus - help: Do not expose a Prometheus metric endpoint. - - prometheus-host: &prometheus-host - long: prometheus-host - value_name: PROMETHEUS_HOST - help: Expose Prometheus endpoint at given interface. - - prometheus-port: &prometheus-port - long: prometheus-port - value_name: PROMETHEUS_PORT - help: Expose Prometheus endpoint at given port. - - sub-to-eth: - about: Synchronize headers from Substrate node to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-contract: - long: eth-contract - value_name: ETH_CONTRACT - help: Address of deployed bridge contract. - takes_value: true - - eth-chain-id: ð-chain-id - long: eth-chain-id - value_name: ETH_CHAIN_ID - help: Chain ID to use for signing. - - eth-signer: ð-signer - long: eth-signer - value_name: ETH_SIGNER - help: Hex-encoded secret to use when transactions are submitted to the Ethereum node. - - sub-host: *sub-host - - sub-port: *sub-port - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port - - eth-deploy-contract: - about: Deploy Bridge contract on Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-contract-code: - long: eth-contract-code - value_name: ETH_CONTRACT_CODE - help: Bytecode of bridge contract. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-authorities-set-id: - long: sub-authorities-set-id - value_name: SUB_AUTHORITIES_SET_ID - help: ID of initial GRANDPA authorities set. - takes_value: true - - sub-authorities-set: - long: sub-authorities-set - value_name: SUB_AUTHORITIES_SET - help: Encoded initial GRANDPA authorities set. - takes_value: true - - sub-initial-header: - long: sub-initial-header - value_name: SUB_INITIAL_HEADER - help: Encoded initial Substrate header. - takes_value: true - - eth-submit-exchange-tx: - about: Submit lock funds transaction to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-nonce: - long: eth-nonce - value_name: ETH_NONCE - help: Nonce that have to be used when building transaction. If not specified, read from PoA node. - takes_value: true - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-amount: - long: eth-amount - value_name: ETH_AMOUNT - help: Amount of ETH to lock (in wei). - takes_value: true - - sub-recipient: - long: sub-recipient - value_name: SUB_RECIPIENT - help: Hex-encoded Public key of funds recipient in Substrate chain. - takes_value: true - - eth-exchange-sub: - about: Submit proof of PoA lock funds transaction to Substrate node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-start-with-block: - long: eth-start-with-block - value_name: ETH_START_WITH_BLOCK - help: Auto-relay transactions starting with given block number. If not specified, starts with best finalized Ethereum block (known to Substrate node) transactions. - takes_value: true - conflicts_with: - - eth-tx-hash - - eth-tx-hash: - long: eth-tx-hash - value_name: ETH_TX_HASH - help: Hash of the lock funds transaction. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-signer: *sub-signer - - sub-signer-password: *sub-signer-password - - sub-pallet-instance: *sub-pallet-instance - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs deleted file mode 100644 index 6fe3a1ce17b5ec3b5a1d31d43be792d2e6c16c3e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::rpc_errors::RpcError; -use crate::substrate_sync_loop::QueuedRialtoHeader; - -use async_trait::async_trait; -use bp_eth_poa::signatures::secret_to_address; -use codec::{Decode, Encode}; -use ethabi::FunctionOutputDecoder; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::{ - sign_and_submit_transaction, - types::{Address, CallRequest, HeaderId as EthereumHeaderId, Receipt, H256, U256}, - Client as EthereumClient, Error as EthereumNodeError, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::HeaderId as RialtoHeaderId; -use relay_utils::{HeaderId, MaybeConnectionError}; -use sp_runtime::EncodedJustification; -use std::collections::HashSet; - -// to encode/decode contract calls -ethabi_contract::use_contract!(bridge_contract, "res/substrate-bridge-abi.json"); - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum contract. -#[async_trait] -pub trait EthereumHighLevelRpc { - /// Returns best Substrate block that PoA chain knows of. - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult; - - /// Returns true if Substrate header is known to Ethereum node. - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)>; - - /// Submits Substrate headers to Ethereum contract. - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders; - - /// Returns ids of incomplete Substrate headers. - async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult>; - - /// Complete Substrate header. - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: EncodedJustification, - ) -> RpcResult; - - /// Submit ethereum transaction. - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()>; - - /// Retrieve transactions receipts for given block. - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)>; -} - -#[async_trait] -impl EthereumHighLevelRpc for EthereumClient { - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult { - let (encoded_call, call_decoder) = bridge_contract::functions::best_known_header::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let (number, raw_hash) = call_decoder.decode(&call_result.0)?; - let hash = rialto_runtime::Hash::decode(&mut &raw_hash[..])?; - - if number != number.low_u32().into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)); - } - - Ok(HeaderId(number.low_u32(), hash)) - } - - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)> { - let (encoded_call, call_decoder) = bridge_contract::functions::is_known_header::call(id.1); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let is_known_block = call_decoder.decode(&call_result.0)?; - - Ok((id, is_known_block)) - } - - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders { - // read nonce of signer - let address: Address = secret_to_address(¶ms.signer); - let nonce = match self.account_nonce(address).await { - Ok(nonce) => nonce, - Err(error) => { - return SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: headers.iter().rev().map(|header| header.id()).collect(), - fatal_error: Some(error.into()), - } - } - }; - - // submit headers. Note that we're cloning self here. It is ok, because - // cloning `jsonrpsee::Client` only clones reference to background threads - submit_substrate_headers( - EthereumHeadersSubmitter { - client: self.clone(), - params, - contract_address, - nonce, - }, - headers, - ) - .await - } - - async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult> { - let (encoded_call, call_decoder) = bridge_contract::functions::incomplete_headers::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - - // Q: Is is correct to call these "incomplete_ids"? - let (incomplete_headers_numbers, incomplete_headers_hashes) = call_decoder.decode(&call_result.0)?; - let incomplete_ids = incomplete_headers_numbers - .into_iter() - .zip(incomplete_headers_hashes) - .filter_map(|(number, hash)| { - if number != number.low_u32().into() { - return None; - } - - Some(HeaderId(number.low_u32(), hash)) - }) - .collect(); - - Ok(incomplete_ids) - } - - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: EncodedJustification, - ) -> RpcResult { - let _ = self - .submit_ethereum_transaction( - ¶ms, - Some(contract_address), - None, - false, - bridge_contract::functions::import_finality_proof::encode_input(id.0, id.1, justification), - ) - .await?; - - Ok(id) - } - - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()> { - sign_and_submit_transaction(self, params, contract_address, nonce, double_gas, encoded_call) - .await - .map_err(Into::into) - } - - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)> { - let mut transaction_receipts = Vec::with_capacity(transactions.len()); - for transaction in transactions { - let transaction_receipt = self.transaction_receipt(transaction).await?; - transaction_receipts.push(transaction_receipt); - } - Ok((id, transaction_receipts)) - } -} - -/// Max number of headers which can be sent to Solidity contract. -pub const HEADERS_BATCH: usize = 4; - -/// Substrate headers to send to the Ethereum light client. -/// -/// The Solidity contract can only accept a fixed number of headers in one go. -/// This struct is meant to encapsulate this limitation. -#[derive(Debug)] -#[cfg_attr(test, derive(Clone))] -pub struct HeadersBatch { - pub header1: QueuedRialtoHeader, - pub header2: Option, - pub header3: Option, - pub header4: Option, -} - -impl HeadersBatch { - /// Create new headers from given header & ids collections. - /// - /// This method will pop `HEADERS_BATCH` items from both collections - /// and construct `Headers` object and a vector of `RialtoHeaderId`s. - pub fn pop_from( - headers: &mut Vec, - ids: &mut Vec, - ) -> Result<(Self, Vec), ()> { - if headers.len() != ids.len() { - log::error!(target: "bridge", "Collection size mismatch ({} vs {})", headers.len(), ids.len()); - return Err(()); - } - - let header1 = headers.pop().ok_or(())?; - let header2 = headers.pop(); - let header3 = headers.pop(); - let header4 = headers.pop(); - - let mut submitting_ids = Vec::with_capacity(HEADERS_BATCH); - for _ in 0..HEADERS_BATCH { - submitting_ids.extend(ids.pop().iter()); - } - - Ok(( - Self { - header1, - header2, - header3, - header4, - }, - submitting_ids, - )) - } - - /// Returns unified array of headers. - /// - /// The first element is always `Some`. - fn headers(&self) -> [Option<&QueuedRialtoHeader>; HEADERS_BATCH] { - [ - Some(&self.header1), - self.header2.as_ref(), - self.header3.as_ref(), - self.header4.as_ref(), - ] - } - - /// Encodes all headers. If header is not present an empty vector will be returned. - pub fn encode(&self) -> [Vec; HEADERS_BATCH] { - let encode = |h: &QueuedRialtoHeader| h.header().encode(); - let headers = self.headers(); - [ - headers[0].map(encode).unwrap_or_default(), - headers[1].map(encode).unwrap_or_default(), - headers[2].map(encode).unwrap_or_default(), - headers[3].map(encode).unwrap_or_default(), - ] - } - /// Returns number of contained headers. - pub fn len(&self) -> usize { - let is_set = |h: &Option<&QueuedRialtoHeader>| if h.is_some() { 1 } else { 0 }; - self.headers().iter().map(is_set).sum() - } - - /// Remove headers starting from `idx` (0-based) from this collection. - /// - /// The collection will be left with `[0, idx)` headers. - /// Returns `Err` when `idx == 0`, since `Headers` must contain at least one header, - /// or when `idx > HEADERS_BATCH`. - pub fn split_off(&mut self, idx: usize) -> Result<(), ()> { - if idx == 0 || idx > HEADERS_BATCH { - return Err(()); - } - let mut vals: [_; HEADERS_BATCH] = [&mut None, &mut self.header2, &mut self.header3, &mut self.header4]; - for val in vals.iter_mut().skip(idx) { - **val = None; - } - Ok(()) - } -} - -/// Substrate headers submitter API. -#[async_trait] -trait HeadersSubmitter { - /// Returns Ok(0) if all given not-yet-imported headers are complete. - /// Returns Ok(index != 0) where index is 1-based index of first header that is incomplete. - /// - /// Returns Err(()) if contract has rejected headers. This means that the contract is - /// unable to import first header (e.g. it may already be imported). - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult; - - /// Submit given headers to Ethereum node. - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()>; -} - -/// Implementation of Substrate headers submitter that sends headers to running Ethereum node. -struct EthereumHeadersSubmitter { - client: EthereumClient, - params: EthereumSigningParams, - contract_address: Address, - nonce: U256, -} - -#[async_trait] -impl HeadersSubmitter for EthereumHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - let [h1, h2, h3, h4] = headers.encode(); - let (encoded_call, call_decoder) = bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4); - let call_request = CallRequest { - to: Some(self.contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.client.eth_call(call_request).await?; - let incomplete_index: U256 = call_decoder.decode(&call_result.0)?; - if incomplete_index > HEADERS_BATCH.into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex)); - } - - Ok(incomplete_index.low_u32() as _) - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - let [h1, h2, h3, h4] = headers.encode(); - let result = self - .client - .submit_ethereum_transaction( - &self.params, - Some(self.contract_address), - Some(self.nonce), - false, - bridge_contract::functions::import_headers::encode_input(h1, h2, h3, h4), - ) - .await; - - if result.is_ok() { - self.nonce += U256::one(); - } - - result - } -} - -/// Submit multiple Substrate headers. -async fn submit_substrate_headers( - mut header_submitter: impl HeadersSubmitter, - mut headers: Vec, -) -> SubmittedHeaders { - let mut submitted_headers = SubmittedHeaders::default(); - - let mut ids = headers.iter().map(|header| header.id()).rev().collect::>(); - headers.reverse(); - - while !headers.is_empty() { - let (headers, submitting_ids) = - HeadersBatch::pop_from(&mut headers, &mut ids).expect("Headers and ids are not empty; qed"); - - submitted_headers.fatal_error = - submit_substrate_headers_batch(&mut header_submitter, &mut submitted_headers, submitting_ids, headers) - .await; - - if submitted_headers.fatal_error.is_some() { - ids.reverse(); - submitted_headers.rejected.extend(ids); - break; - } - } - - submitted_headers -} - -/// Submit 4 Substrate headers in single PoA transaction. -async fn submit_substrate_headers_batch( - header_submitter: &mut impl HeadersSubmitter, - submitted_headers: &mut SubmittedHeaders, - mut ids: Vec, - mut headers: HeadersBatch, -) -> Option { - debug_assert_eq!(ids.len(), headers.len()); - - // if parent of first header is either incomplete, or rejected, we assume that contract - // will reject this header as well - let parent_id = headers.header1.parent_id(); - if submitted_headers.rejected.contains(&parent_id) || submitted_headers.incomplete.contains(&parent_id) { - submitted_headers.rejected.extend(ids); - return None; - } - - // check if headers are incomplete - let incomplete_header_index = match header_submitter.is_headers_incomplete(&headers).await { - // All headers valid - Ok(0) => None, - Ok(incomplete_header_index) => Some(incomplete_header_index), - Err(error) => { - // contract has rejected all headers => we do not want to submit it - submitted_headers.rejected.extend(ids); - if error.is_connection_error() { - return Some(error); - } else { - return None; - } - } - }; - - // Modify `ids` and `headers` to only contain values that are going to be accepted. - let rejected = if let Some(idx) = incomplete_header_index { - let len = std::cmp::min(idx, ids.len()); - headers - .split_off(len) - .expect("len > 0, the case where all headers are valid is converted to None; qed"); - ids.split_off(len) - } else { - Vec::new() - }; - let submitted = ids; - let submit_result = header_submitter.submit_headers(headers).await; - match submit_result { - Ok(_) => { - if incomplete_header_index.is_some() { - submitted_headers.incomplete.extend(submitted.iter().last().cloned()); - } - submitted_headers.submitted.extend(submitted); - submitted_headers.rejected.extend(rejected); - None - } - Err(error) => { - submitted_headers.rejected.extend(submitted); - submitted_headers.rejected.extend(rejected); - Some(error) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::traits::Header; - - struct TestHeadersSubmitter { - incomplete: Vec, - failed: Vec, - } - - #[async_trait] - impl HeadersSubmitter for TestHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - if self.incomplete.iter().any(|i| i.0 == headers.header1.id().0) { - Ok(1) - } else { - Ok(0) - } - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - if self.failed.iter().any(|i| i.0 == headers.header1.id().0) { - Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)) - } else { - Ok(()) - } - } - } - - fn header(number: rialto_runtime::BlockNumber) -> QueuedRialtoHeader { - QueuedRialtoHeader::new( - rialto_runtime::Header::new( - number, - Default::default(), - Default::default(), - if number == 0 { - Default::default() - } else { - header(number - 1).id().1 - }, - Default::default(), - ) - .into(), - ) - } - - #[test] - fn descendants_of_incomplete_headers_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { - incomplete: vec![header(5).id()], - failed: vec![], - }, - vec![header(5), header(6)], - )); - assert_eq!(submitted_headers.submitted, vec![header(5).id()]); - assert_eq!(submitted_headers.incomplete, vec![header(5).id()]); - assert_eq!(submitted_headers.rejected, vec![header(6).id()]); - assert!(submitted_headers.fatal_error.is_none()); - } - - #[test] - fn headers_after_fatal_error_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { - incomplete: vec![], - failed: vec![header(9).id()], - }, - vec![ - header(5), - header(6), - header(7), - header(8), - header(9), - header(10), - header(11), - ], - )); - assert_eq!( - submitted_headers.submitted, - vec![header(5).id(), header(6).id(), header(7).id(), header(8).id()] - ); - assert_eq!(submitted_headers.incomplete, vec![]); - assert_eq!( - submitted_headers.rejected, - vec![header(9).id(), header(10).id(), header(11).id(),] - ); - assert!(submitted_headers.fatal_error.is_some()); - } - - fn headers_batch() -> HeadersBatch { - let mut init_headers = vec![header(1), header(2), header(3), header(4), header(5)]; - init_headers.reverse(); - let mut init_ids = init_headers.iter().map(|h| h.id()).collect(); - let (headers, ids) = HeadersBatch::pop_from(&mut init_headers, &mut init_ids).unwrap(); - assert_eq!(init_headers, vec![header(5)]); - assert_eq!(init_ids, vec![header(5).id()]); - assert_eq!( - ids, - vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()] - ); - headers - } - - #[test] - fn headers_batch_len() { - let headers = headers_batch(); - assert_eq!(headers.len(), 4); - } - - #[test] - fn headers_batch_encode() { - let headers = headers_batch(); - assert_eq!( - headers.encode(), - [ - header(1).header().encode(), - header(2).header().encode(), - header(3).header().encode(), - header(4).header().encode(), - ] - ); - } - - #[test] - fn headers_batch_split_off() { - // given - let mut headers = headers_batch(); - - // when - assert!(headers.split_off(0).is_err()); - assert_eq!(headers.header1, header(1)); - assert!(headers.header2.is_some()); - assert!(headers.header3.is_some()); - assert!(headers.header4.is_some()); - - // when - let mut h = headers.clone(); - h.split_off(1).unwrap(); - assert!(h.header2.is_none()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(2).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(3).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_none()); - - // when - let mut h = headers; - h.split_off(4).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_some()); - } -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs deleted file mode 100644 index 3f9076f6db2298e0bcbbd84be8e2a783cfc64b32..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::ethereum_client::{bridge_contract, EthereumHighLevelRpc}; -use crate::rpc_errors::RpcError; - -use codec::{Decode, Encode}; -use num_traits::Zero; -use relay_ethereum_client::{ - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto}; -use relay_substrate_client::{ - Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, OpaqueGrandpaAuthoritiesSet, -}; -use relay_utils::HeaderId; - -/// Ethereum synchronization parameters. -#[derive(Debug)] -pub struct EthereumDeployContractParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum contract bytecode. - pub eth_contract_code: Vec, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Initial authorities set id. - pub sub_initial_authorities_set_id: Option, - /// Initial authorities set. - pub sub_initial_authorities_set: Option>, - /// Initial header. - pub sub_initial_header: Option>, -} - -/// Deploy Bridge contract on Ethereum chain. -pub async fn run(params: EthereumDeployContractParams) { - let EthereumDeployContractParams { - eth_params, - eth_sign, - sub_params, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - } = params; - - let result = async move { - let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::try_connect(sub_params).await.map_err(RpcError::Substrate)?; - - let (initial_header_id, initial_header) = prepare_initial_header(&sub_client, sub_initial_header).await?; - let initial_set_id = sub_initial_authorities_set_id.unwrap_or(0); - let initial_set = prepare_initial_authorities_set( - &sub_client, - initial_header_id.1, - sub_initial_authorities_set, - ).await?; - - log::info!( - target: "bridge", - "Deploying Ethereum contract.\r\n\tInitial header: {:?}\r\n\tInitial header id: {:?}\r\n\tInitial header encoded: {}\r\n\tInitial authorities set ID: {}\r\n\tInitial authorities set: {}", - initial_header, - initial_header_id, - hex::encode(&initial_header), - initial_set_id, - hex::encode(&initial_set), - ); - - deploy_bridge_contract( - ð_client, - ð_sign, - eth_contract_code, - initial_header, - initial_set_id, - initial_set, - ).await - }.await; - - if let Err(error) = result { - log::error!(target: "bridge", "{}", error); - } -} - -/// Prepare initial header. -async fn prepare_initial_header( - sub_client: &SubstrateClient, - sub_initial_header: Option>, -) -> Result<(RialtoHeaderId, Vec), String> { - match sub_initial_header { - Some(raw_initial_header) => match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) { - Ok(initial_header) => Ok(( - HeaderId(initial_header.number, initial_header.hash()), - raw_initial_header, - )), - Err(error) => Err(format!("Error decoding initial header: {}", error)), - }, - None => { - let initial_header = sub_client.header_by_number(Zero::zero()).await; - initial_header - .map(|header| (HeaderId(Zero::zero(), header.hash()), header.encode())) - .map_err(|error| format!("Error reading Substrate genesis header: {:?}", error)) - } - } -} - -/// Prepare initial GRANDPA authorities set. -async fn prepare_initial_authorities_set( - sub_client: &SubstrateClient, - sub_initial_header_hash: rialto_runtime::Hash, - sub_initial_authorities_set: Option>, -) -> Result { - let initial_authorities_set = match sub_initial_authorities_set { - Some(initial_authorities_set) => Ok(initial_authorities_set), - None => sub_client.grandpa_authorities_set(sub_initial_header_hash).await, - }; - - initial_authorities_set.map_err(|error| format!("Error reading GRANDPA authorities set: {:?}", error)) -} - -/// Deploy bridge contract to Ethereum chain. -async fn deploy_bridge_contract( - eth_client: &EthereumClient, - params: &EthereumSigningParams, - contract_code: Vec, - initial_header: Vec, - initial_set_id: u64, - initial_authorities: Vec, -) -> Result<(), String> { - eth_client - .submit_ethereum_transaction( - params, - None, - None, - false, - bridge_contract::constructor(contract_code, initial_header, initial_set_id, initial_authorities), - ) - .await - .map_err(|error| format!("Error deploying contract: {:?}", error)) -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs deleted file mode 100644 index 3111aa2de436375fa69b1ec60420602cca755c29..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of PoA -> Substrate exchange transactions. - -use crate::instances::BridgeInstance; -use crate::rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc}; -use crate::rpc_errors::RpcError; -use crate::substrate_types::into_substrate_ethereum_receipt; - -use async_trait::async_trait; -use bp_currency_exchange::MaybeLockFundsTransaction; -use exchange_relay::exchange::{ - relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient, - TransactionProofPipeline, -}; -use exchange_relay::exchange_loop::{run as run_loop, InMemoryStorage}; -use relay_ethereum_client::{ - types::{ - HeaderId as EthereumHeaderId, HeaderWithTransactions as EthereumHeaderWithTransactions, - Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256, HEADER_ID_PROOF, - }, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId}; -use rialto_runtime::exchange::EthereumTransactionInclusionProof; -use std::{sync::Arc, time::Duration}; - -/// Interval at which we ask Ethereum node for updates. -const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - -/// Exchange relay mode. -#[derive(Debug)] -pub enum ExchangeRelayMode { - /// Relay single transaction and quit. - Single(EthereumTransactionHash), - /// Auto-relay transactions starting with given block. - Auto(Option), -} - -/// PoA exchange transaction relay params. -pub struct EthereumExchangeParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Relay working mode. - pub mode: ExchangeRelayMode, - /// Metrics parameters. - pub metrics_params: MetricsParams, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -impl std::fmt::Debug for EthereumExchangeParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("EthereumExchangeParams") - .field("eth_params", &self.eth_params) - .field("sub_params", &self.sub_params) - .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) - .field("mode", &self.mode) - .field("metrics_params", &self.metrics_params) - .field("instance", &self.instance) - .finish() - } -} - -/// Ethereum to Substrate exchange pipeline. -struct EthereumToSubstrateExchange; - -impl TransactionProofPipeline for EthereumToSubstrateExchange { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Block = EthereumSourceBlock; - type TransactionProof = EthereumTransactionInclusionProof; -} - -/// Ethereum source block. -struct EthereumSourceBlock(EthereumHeaderWithTransactions); - -impl SourceBlock for EthereumSourceBlock { - type Hash = H256; - type Number = u64; - type Transaction = EthereumSourceTransaction; - - fn id(&self) -> EthereumHeaderId { - HeaderId( - self.0.number.expect(HEADER_ID_PROOF).as_u64(), - self.0.hash.expect(HEADER_ID_PROOF), - ) - } - - fn transactions(&self) -> Vec { - self.0 - .transactions - .iter() - .cloned() - .map(EthereumSourceTransaction) - .collect() - } -} - -/// Ethereum source transaction. -struct EthereumSourceTransaction(EthereumTransaction); - -impl SourceTransaction for EthereumSourceTransaction { - type Hash = EthereumTransactionHash; - - fn hash(&self) -> Self::Hash { - self.0.hash - } -} - -/// Ethereum node as transactions proof source. -#[derive(Clone)] -struct EthereumTransactionsSource { - client: EthereumClient, -} - -#[async_trait] -impl RelayClient for EthereumTransactionsSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl SourceClient for EthereumTransactionsSource { - async fn tick(&self) { - async_std::task::sleep(ETHEREUM_TICK_INTERVAL).await; - } - - async fn block_by_hash(&self, hash: H256) -> Result { - self.client - .header_by_hash_with_transactions(hash) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn block_by_number(&self, number: u64) -> Result { - self.client - .header_by_number_with_transactions(number) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn transaction_block( - &self, - hash: &EthereumTransactionHash, - ) -> Result, RpcError> { - let eth_tx = match self.client.transaction_by_hash(*hash).await? { - Some(eth_tx) => eth_tx, - None => return Ok(None), - }; - - // we need transaction to be mined => check if it is included in the block - let (eth_header_id, eth_tx_index) = match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) { - (Some(block_number), Some(block_hash), Some(transaction_index)) => ( - HeaderId(block_number.as_u64(), block_hash), - transaction_index.as_u64() as _, - ), - _ => return Ok(None), - }; - - Ok(Some((eth_header_id, eth_tx_index))) - } - - async fn transaction_proof( - &self, - block: &EthereumSourceBlock, - tx_index: usize, - ) -> Result { - const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = "RPC level checks that transactions from Ethereum\ - node are having `raw` field; qed"; - const BLOCK_HAS_HASH_FIELD_PROOF: &str = "RPC level checks that block has `hash` field; qed"; - - let mut transaction_proof = Vec::with_capacity(block.0.transactions.len()); - for tx in &block.0.transactions { - let raw_tx_receipt = self - .client - .transaction_receipt(tx.hash) - .await - .map(|receipt| into_substrate_ethereum_receipt(&receipt)) - .map(|receipt| receipt.rlp())?; - let raw_tx = tx.raw.clone().expect(TRANSACTION_HAS_RAW_FIELD_PROOF).0; - transaction_proof.push((raw_tx, raw_tx_receipt)); - } - - Ok(EthereumTransactionInclusionProof { - block: block.0.hash.expect(BLOCK_HAS_HASH_FIELD_PROOF), - index: tx_index as _, - proof: transaction_proof, - }) - } -} - -/// Substrate node as transactions proof target. -#[derive(Clone)] -struct SubstrateTransactionsTarget { - client: SubstrateClient, - sign_params: RialtoSigningParams, - bridge_instance: Arc, -} - -#[async_trait] -impl RelayClient for SubstrateTransactionsTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateTransactionsTarget { - async fn tick(&self) { - async_std::task::sleep(Rialto::AVERAGE_BLOCK_INTERVAL).await; - } - - async fn is_header_known(&self, id: &EthereumHeaderId) -> Result { - self.client.ethereum_header_known(*id).await - } - - async fn is_header_finalized(&self, id: &EthereumHeaderId) -> Result { - // we check if header is finalized by simple comparison of the header number and - // number of best finalized PoA header known to Substrate node. - // - // this may lead to failure in tx proof import if PoA reorganization has happened - // after we have checked that our tx has been included into given block - // - // the fix is easy, but since this code is mostly developed for demonstration purposes, - // I'm leaving this KISS-based design here - let best_finalized_ethereum_block = self.client.best_ethereum_finalized_block().await?; - Ok(id.0 <= best_finalized_ethereum_block.0) - } - - async fn best_finalized_header_id(&self) -> Result { - // we can't continue to relay exchange proofs if Substrate node is out of sync, because - // it may have already received (some of) proofs that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_finalized_block().await - } - - async fn filter_transaction_proof(&self, proof: &EthereumTransactionInclusionProof) -> Result { - // let's try to parse transaction locally - let (raw_tx, raw_tx_receipt) = &proof.proof[proof.index as usize]; - let parse_result = rialto_runtime::exchange::EthTransaction::parse(raw_tx); - if parse_result.is_err() { - return Ok(false); - } - - // now let's check if transaction is successful - match bp_eth_poa::Receipt::is_successful_raw_receipt(raw_tx_receipt) { - Ok(true) => (), - _ => return Ok(false), - } - - // seems that transaction is relayable - let's check if runtime is able to import it - // (we can't if e.g. header is pruned or there's some issue with tx data) - self.client.verify_exchange_transaction_proof(proof.clone()).await - } - - async fn submit_transaction_proof(&self, proof: EthereumTransactionInclusionProof) -> Result<(), RpcError> { - let (sign_params, bridge_instance) = (self.sign_params.clone(), self.bridge_instance.clone()); - self.client - .submit_exchange_transaction_proof(sign_params, bridge_instance, proof) - .await - } -} - -/// Relay exchange transaction proof(s) to Substrate node. -pub async fn run(params: EthereumExchangeParams) { - match params.mode { - ExchangeRelayMode::Single(eth_tx_hash) => { - let result = run_single_transaction_relay(params, eth_tx_hash).await; - match result { - Ok(_) => log::info!( - target: "bridge", - "Ethereum transaction {} proof has been successfully submitted to Substrate node", - eth_tx_hash, - ), - Err(err) => log::error!( - target: "bridge", - "Error submitting Ethereum transaction {} proof to Substrate node: {}", - eth_tx_hash, - err, - ), - } - } - ExchangeRelayMode::Auto(eth_start_with_block_number) => { - let result = run_auto_transactions_relay_loop(params, eth_start_with_block_number).await; - if let Err(err) = result { - log::error!( - target: "bridge", - "Error auto-relaying Ethereum transactions proofs to Substrate node: {}", - err, - ); - } - } - } -} - -/// Run single transaction proof relay and stop. -async fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H256) -> Result<(), String> { - let EthereumExchangeParams { - eth_params, - sub_params, - sub_sign, - instance, - .. - } = params; - - let eth_client = EthereumClient::try_connect(eth_params) - .await - .map_err(RpcError::Ethereum)?; - let sub_client = SubstrateClient::::try_connect(sub_params) - .await - .map_err(RpcError::Substrate)?; - - let source = EthereumTransactionsSource { client: eth_client }; - let target = SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }; - - relay_single_transaction_proof(&source, &target, eth_tx_hash).await -} - -async fn run_auto_transactions_relay_loop( - params: EthereumExchangeParams, - eth_start_with_block_number: Option, -) -> Result<(), String> { - let EthereumExchangeParams { - eth_params, - sub_params, - sub_sign, - metrics_params, - instance, - .. - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let eth_start_with_block_number = match eth_start_with_block_number { - Some(eth_start_with_block_number) => eth_start_with_block_number, - None => { - sub_client - .best_ethereum_finalized_block() - .await - .map_err(|err| { - format!( - "Error retrieving best finalized Ethereum block from Substrate node: {:?}", - err - ) - })? - .0 - } - }; - - run_loop( - InMemoryStorage::new(eth_start_with_block_number), - EthereumTransactionsSource { client: eth_client }, - SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }, - metrics_params, - futures::future::pending(), - ) - .await?; - - Ok(()) -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs deleted file mode 100644 index 602d4f14e4f0bacc7ab360b2db288428c4ab758b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Submitting Ethereum -> Substrate exchange transactions. - -use bp_eth_poa::{ - signatures::{secret_to_address, SignTransaction}, - UnsignedTransaction, -}; -use relay_ethereum_client::{ - types::{CallRequest, U256}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, -}; -use rialto_runtime::exchange::LOCK_FUNDS_ADDRESS; - -/// Ethereum exchange transaction params. -#[derive(Debug)] -pub struct EthereumExchangeSubmitParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum signer nonce. - pub eth_nonce: Option, - /// Amount of Ethereum tokens to lock. - pub eth_amount: U256, - /// Funds recipient on Substrate side. - pub sub_recipient: [u8; 32], -} - -/// Submit single Ethereum -> Substrate exchange transaction. -pub async fn run(params: EthereumExchangeSubmitParams) { - let EthereumExchangeSubmitParams { - eth_params, - eth_sign, - eth_nonce, - eth_amount, - sub_recipient, - } = params; - - let result: Result<_, String> = async move { - let eth_client = EthereumClient::try_connect(eth_params) - .await - .map_err(|err| format!("error connecting to Ethereum node: {:?}", err))?; - - let eth_signer_address = secret_to_address(ð_sign.signer); - let sub_recipient_encoded = sub_recipient; - let nonce = match eth_nonce { - Some(eth_nonce) => eth_nonce, - None => eth_client - .account_nonce(eth_signer_address) - .await - .map_err(|err| format!("error fetching acount nonce: {:?}", err))?, - }; - let gas = eth_client - .estimate_gas(CallRequest { - from: Some(eth_signer_address), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: Some(eth_amount), - data: Some(sub_recipient_encoded.to_vec().into()), - ..Default::default() - }) - .await - .map_err(|err| format!("error estimating gas requirements: {:?}", err))?; - let eth_tx_unsigned = UnsignedTransaction { - nonce, - gas_price: eth_sign.gas_price, - gas, - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: eth_amount, - payload: sub_recipient_encoded.to_vec(), - }; - let eth_tx_signed = eth_tx_unsigned - .clone() - .sign_by(ð_sign.signer, Some(eth_sign.chain_id)); - eth_client - .submit_transaction(eth_tx_signed) - .await - .map_err(|err| format!("error submitting transaction: {:?}", err))?; - - Ok(eth_tx_unsigned) - } - .await; - - match result { - Ok(eth_tx_unsigned) => { - log::info!( - target: "bridge", - "Exchange transaction has been submitted to Ethereum node: {:?}", - eth_tx_unsigned, - ); - } - Err(err) => { - log::error!( - target: "bridge", - "Error submitting exchange transaction to Ethereum node: {}", - err, - ); - } - } -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs deleted file mode 100644 index 111abcd86e7105145cad5a69385f2070e66154d5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum PoA -> Rialto-Substrate synchronization. - -use crate::ethereum_client::EthereumHighLevelRpc; -use crate::instances::BridgeInstance; -use crate::rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc}; -use crate::rpc_errors::RpcError; -use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::{HeadersSyncParams, TargetTransactionMode}, - sync_loop::{SourceClient, TargetClient}, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::{HeaderHash, HeaderId as EthereumHeaderId, Receipt, SyncHeader as Header}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; - -use std::fmt::Debug; -use std::{collections::HashSet, sync::Arc, time::Duration}; - -pub mod consts { - use super::*; - - /// Interval at which we check new Ethereum headers when we are synced/almost synced. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - /// Max number of headers in single submit transaction. - pub const MAX_HEADERS_IN_SINGLE_SUBMIT: usize = 32; - /// Max total size of headers in single submit transaction. This only affects signed - /// submissions, when several headers are submitted at once. 4096 is the maximal **expected** - /// size of the Ethereum header + transactions receipts (if they're required). - pub const MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT: usize = MAX_HEADERS_IN_SINGLE_SUBMIT * 4096; - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 128; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 128; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned). - pub const PRUNE_DEPTH: u32 = 4096; -} - -/// Ethereum synchronization parameters. -pub struct EthereumSyncParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: MetricsParams, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -impl Debug for EthereumSyncParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("EthereumSyncParams") - .field("eth_params", &self.eth_params) - .field("sub_params", &self.sub_params) - .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) - .field("sync_params", &self.sync_params) - .field("metrics_params", &self.metrics_params) - .field("instance", &self.instance) - .finish() - } -} - -/// Ethereum synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct EthereumHeadersSyncPipeline; - -impl HeadersSyncPipeline for EthereumHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Hash = HeaderHash; - type Number = u64; - type Header = Header; - type Extra = Vec; - type Completion = (); - - fn estimate_size(source: &QueuedHeader) -> usize { - into_substrate_ethereum_header(source.header()).encode().len() - + into_substrate_ethereum_receipts(source.extra()) - .map(|extra| extra.encode().len()) - .unwrap_or(0) - } -} - -/// Queued ethereum header ID. -pub type QueuedEthereumHeader = QueuedHeader; - -/// Ethereum client as headers source. -#[derive(Clone)] -struct EthereumHeadersSource { - /// Ethereum node client. - client: EthereumClient, -} - -impl EthereumHeadersSource { - fn new(client: EthereumClient) -> Self { - Self { client } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl SourceClient for EthereumHeadersSource { - async fn best_block_number(&self) -> Result { - // we **CAN** continue to relay headers if Ethereum node is out of sync, because - // Substrate node may be missing headers that are already available at the Ethereum - - self.client.best_block_number().await.map_err(Into::into) - } - - async fn header_by_hash(&self, hash: HeaderHash) -> Result { - self.client - .header_by_hash(hash) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_by_number(&self, number: u64) -> Result { - self.client - .header_by_number(number) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_completion(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, Option<()>), RpcError> { - Ok((id, None)) - } - - async fn header_extra( - &self, - id: EthereumHeaderId, - header: QueuedEthereumHeader, - ) -> Result<(EthereumHeaderId, Vec), RpcError> { - self.client - .transaction_receipts(id, header.header().transactions.clone()) - .await - } -} - -#[derive(Clone)] -struct SubstrateHeadersTarget { - /// Substrate node client. - client: SubstrateClient, - /// Whether we want to submit signed (true), or unsigned (false) transactions. - sign_transactions: bool, - /// Substrate signing params. - sign_params: RialtoSigningParams, - /// Bridge instance used in Ethereum to Substrate sync. - bridge_instance: Arc, -} - -impl SubstrateHeadersTarget { - fn new( - client: SubstrateClient, - sign_transactions: bool, - sign_params: RialtoSigningParams, - bridge_instance: Arc, - ) -> Self { - Self { - client, - sign_transactions, - sign_params, - bridge_instance, - } - } -} - -#[async_trait] -impl RelayClient for SubstrateHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Substrate node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_block().await - } - - async fn is_known_header(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, bool), RpcError> { - Ok((id, self.client.ethereum_header_known(id).await?)) - } - - async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { - let (sign_params, bridge_instance, sign_transactions) = ( - self.sign_params.clone(), - self.bridge_instance.clone(), - self.sign_transactions, - ); - self.client - .submit_ethereum_headers(sign_params, bridge_instance, headers, sign_transactions) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - Ok(HashSet::new()) - } - - #[allow(clippy::unit_arg)] - async fn complete_header(&self, id: EthereumHeaderId, _completion: ()) -> Result { - Ok(id) - } - - async fn requires_extra(&self, header: QueuedEthereumHeader) -> Result<(EthereumHeaderId, bool), RpcError> { - // we can minimize number of receipts_check calls by checking header - // logs bloom here, but it may give us false positives (when authorities - // source is contract, we never need any logs) - let id = header.header().id(); - let sub_eth_header = into_substrate_ethereum_header(header.header()); - Ok((id, self.client.ethereum_receipts_required(sub_eth_header).await?)) - } -} - -/// Run Ethereum headers synchronization. -pub async fn run(params: EthereumSyncParams) -> Result<(), RpcError> { - let EthereumSyncParams { - eth_params, - sub_params, - sub_sign, - sync_params, - metrics_params, - instance, - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let sign_sub_transactions = match sync_params.target_tx_mode { - TargetTransactionMode::Signed | TargetTransactionMode::Backup => true, - TargetTransactionMode::Unsigned => false, - }; - - let source = EthereumHeadersSource::new(eth_client); - let target = SubstrateHeadersTarget::new(sub_client, sign_sub_transactions, sub_sign, instance); - - headers_relay::sync_loop::run( - source, - consts::ETHEREUM_TICK_INTERVAL, - target, - Rialto::AVERAGE_BLOCK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(RpcError::SyncLoop)?; - - Ok(()) -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/instances.rs b/polkadot/bridges/relays/bin-ethereum/src/instances.rs deleted file mode 100644 index 2ade8632a92c03d0ccb208c612174ba99652a0aa..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/instances.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The PoA Bridge Pallet provides a way to include multiple instances of itself in a runtime. When -//! synchronizing a Substrate chain which can include multiple instances of the bridge pallet we -//! must somehow decide which of the instances to sync. -//! -//! Note that each instance of the bridge pallet is coupled with an instance of the currency exchange -//! pallet. We must also have a way to create `Call`s for the correct currency exchange instance. -//! -//! This module helps by preparing the correct `Call`s for each of the different pallet instances. - -use crate::ethereum_sync_loop::QueuedEthereumHeader; -use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}; - -use rialto_runtime::exchange::EthereumTransactionInclusionProof as Proof; -use rialto_runtime::Call; - -/// Interface for `Calls` which are needed to correctly sync the bridge. -/// -/// Each instance of the bridge and currency exchange pallets in the bridge runtime requires similar -/// but slightly different `Call` in order to be synchronized. -pub trait BridgeInstance: Send + Sync + std::fmt::Debug { - /// Used to build a `Call` for importing signed headers to a Substrate runtime. - fn build_signed_header_call(&self, headers: Vec) -> Call; - /// Used to build a `Call` for importing an unsigned header to a Substrate runtime. - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call; - /// Used to build a `Call` for importing peer transactions to a Substrate runtime. - fn build_currency_exchange_call(&self, proof: Proof) -> Call; -} - -/// Corresponds to the Rialto instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct RialtoPoA; - -impl BridgeInstance for RialtoPoA { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers( - headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - ); - - rialto_runtime::Call::BridgeRialtoPoa(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ); - - rialto_runtime::Call::BridgeRialtoPoa(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof); - rialto_runtime::Call::BridgeRialtoCurrencyExchange(pallet_call) - } -} - -/// Corresponds to the Kovan instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct Kovan; - -impl BridgeInstance for Kovan { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers( - headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - ); - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ); - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof); - rialto_runtime::Call::BridgeKovanCurrencyExchange(pallet_call) - } -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/main.rs b/polkadot/bridges/relays/bin-ethereum/src/main.rs deleted file mode 100644 index bcdae353d3dc4d4aab3af720a59bbea6746704dc..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/main.rs +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![recursion_limit = "1024"] - -mod ethereum_client; -mod ethereum_deploy_contract; -mod ethereum_exchange; -mod ethereum_exchange_submit; -mod ethereum_sync_loop; -mod instances; -mod rialto_client; -mod rpc_errors; -mod substrate_sync_loop; -mod substrate_types; - -use ethereum_deploy_contract::EthereumDeployContractParams; -use ethereum_exchange::EthereumExchangeParams; -use ethereum_exchange_submit::EthereumExchangeSubmitParams; -use ethereum_sync_loop::EthereumSyncParams; -use headers_relay::sync::TargetTransactionMode; -use hex_literal::hex; -use instances::{BridgeInstance, Kovan, RialtoPoA}; -use relay_utils::{ - initialize::initialize_relay, - metrics::{MetricsAddress, MetricsParams}, -}; -use secp256k1::SecretKey; -use sp_core::crypto::Pair; -use substrate_sync_loop::SubstrateSyncParams; - -use headers_relay::sync::HeadersSyncParams; -use relay_ethereum_client::{ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams}; -use relay_rialto_client::SigningParams as RialtoSigningParams; -use relay_substrate_client::ConnectionParams as SubstrateConnectionParams; -use std::sync::Arc; - -fn main() { - initialize_relay(); - - let yaml = clap::load_yaml!("cli.yml"); - let matches = clap::App::from_yaml(yaml).get_matches(); - async_std::task::block_on(run_command(&matches)); -} - -async fn run_command(matches: &clap::ArgMatches<'_>) { - match matches.subcommand() { - ("eth-to-sub", Some(eth_to_sub_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB relay."); - if ethereum_sync_loop::run(match ethereum_sync_params(eth_to_sub_matches) { - Ok(ethereum_sync_params) => ethereum_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return; - } - }) - .await - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync."); - }; - } - ("sub-to-eth", Some(sub_to_eth_matches)) => { - log::info!(target: "bridge", "Starting SUB âž¡ ETH relay."); - if substrate_sync_loop::run(match substrate_sync_params(sub_to_eth_matches) { - Ok(substrate_sync_params) => substrate_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return; - } - }) - .await - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync."); - }; - } - ("eth-deploy-contract", Some(eth_deploy_matches)) => { - log::info!(target: "bridge", "Deploying ETH contracts."); - ethereum_deploy_contract::run(match ethereum_deploy_contract_params(eth_deploy_matches) { - Ok(ethereum_deploy_params) => ethereum_deploy_params, - Err(err) => { - log::error!(target: "bridge", "Error during contract deployment: {}", err); - return; - } - }) - .await; - } - ("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => { - log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction."); - ethereum_exchange_submit::run(match ethereum_exchange_submit_params(eth_exchange_submit_matches) { - Ok(eth_exchange_submit_params) => eth_exchange_submit_params, - Err(err) => { - log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err); - return; - } - }) - .await; - } - ("eth-exchange-sub", Some(eth_exchange_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay."); - ethereum_exchange::run(match ethereum_exchange_params(eth_exchange_matches) { - Ok(eth_exchange_params) => eth_exchange_params, - Err(err) => { - log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err); - return; - } - }) - .await; - } - ("", _) => { - log::error!(target: "bridge", "No subcommand specified"); - } - _ => unreachable!("all possible subcommands are checked above; qed"), - } -} - -fn ethereum_connection_params(matches: &clap::ArgMatches) -> Result { - let mut params = EthereumConnectionParams::default(); - if let Some(eth_host) = matches.value_of("eth-host") { - params.host = eth_host.into(); - } - if let Some(eth_port) = matches.value_of("eth-port") { - params.port = eth_port - .parse() - .map_err(|e| format!("Failed to parse eth-port: {}", e))?; - } - Ok(params) -} - -fn ethereum_signing_params(matches: &clap::ArgMatches) -> Result { - let mut params = EthereumSigningParams::default(); - if let Some(eth_signer) = matches.value_of("eth-signer") { - params.signer = - SecretKey::parse_slice(&hex::decode(eth_signer).map_err(|e| format!("Failed to parse eth-signer: {}", e))?) - .map_err(|e| format!("Invalid eth-signer: {}", e))?; - } - if let Some(eth_chain_id) = matches.value_of("eth-chain-id") { - params.chain_id = eth_chain_id - .parse::() - .map_err(|e| format!("Failed to parse eth-chain-id: {}", e))?; - } - Ok(params) -} - -fn substrate_connection_params(matches: &clap::ArgMatches) -> Result { - let mut params = SubstrateConnectionParams::default(); - if let Some(sub_host) = matches.value_of("sub-host") { - params.host = sub_host.into(); - } - if let Some(sub_port) = matches.value_of("sub-port") { - params.port = sub_port - .parse() - .map_err(|e| format!("Failed to parse sub-port: {}", e))?; - } - Ok(params) -} - -fn rialto_signing_params(matches: &clap::ArgMatches) -> Result { - let mut params = sp_keyring::AccountKeyring::Alice.pair(); - - if let Some(sub_signer) = matches.value_of("sub-signer") { - let sub_signer_password = matches.value_of("sub-signer-password"); - params = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password) - .map_err(|e| format!("Failed to parse sub-signer: {:?}", e))?; - } - Ok(params) -} - -fn ethereum_sync_params(matches: &clap::ArgMatches) -> Result { - use crate::ethereum_sync_loop::consts::*; - - let mut sync_params = HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_HEADERS_IN_SINGLE_SUBMIT, - max_headers_size_in_single_submit: MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }; - - match matches.value_of("sub-tx-mode") { - Some("signed") => sync_params.target_tx_mode = TargetTransactionMode::Signed, - Some("unsigned") => { - sync_params.target_tx_mode = TargetTransactionMode::Unsigned; - - // tx pool won't accept too much unsigned transactions - sync_params.max_headers_in_submitted_status = 10; - } - Some("backup") => sync_params.target_tx_mode = TargetTransactionMode::Backup, - Some(mode) => return Err(format!("Invalid sub-tx-mode: {}", mode)), - None => sync_params.target_tx_mode = TargetTransactionMode::Signed, - } - - let params = EthereumSyncParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - sync_params, - }; - - log::debug!(target: "bridge", "Ethereum sync params: {:?}", params); - - Ok(params) -} - -fn substrate_sync_params(matches: &clap::ArgMatches) -> Result { - use crate::substrate_sync_loop::consts::*; - - let eth_contract_address: relay_ethereum_client::types::Address = - if let Some(eth_contract) = matches.value_of("eth-contract") { - eth_contract.parse().map_err(|e| format!("{}", e))? - } else { - "731a10897d267e19b34503ad902d0a29173ba4b1" - .parse() - .expect("address is hardcoded, thus valid; qed") - }; - - let params = SubstrateSyncParams { - sub_params: substrate_connection_params(matches)?, - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - sync_params: HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_SUBMITTED_HEADERS, - max_headers_size_in_single_submit: std::usize::MAX, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }, - eth_contract_address, - }; - - log::debug!(target: "bridge", "Substrate sync params: {:?}", params); - - Ok(params) -} - -fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result { - let eth_contract_code = parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| { - hex::decode(include_str!("../res/substrate-bridge-bytecode.hex")).expect("code is hardcoded, thus valid; qed") - }); - let sub_initial_authorities_set_id = matches - .value_of("sub-authorities-set-id") - .map(|set| { - set.parse() - .map_err(|e| format!("Failed to parse sub-authorities-set-id: {}", e)) - }) - .transpose()?; - let sub_initial_authorities_set = parse_hex_argument(matches, "sub-authorities-set")?; - let sub_initial_header = parse_hex_argument(matches, "sub-initial-header")?; - - let params = EthereumDeployContractParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - }; - - log::debug!(target: "bridge", "Deploy params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result { - let eth_nonce = matches - .value_of("eth-nonce") - .map(|eth_nonce| { - relay_ethereum_client::types::U256::from_dec_str(eth_nonce) - .map_err(|e| format!("Failed to parse eth-nonce: {}", e)) - }) - .transpose()?; - - let eth_amount = matches - .value_of("eth-amount") - .map(|eth_amount| { - eth_amount - .parse() - .map_err(|e| format!("Failed to parse eth-amount: {}", e)) - }) - .transpose()? - .unwrap_or_else(|| { - // This is in Wei, represents 1 ETH - 1_000_000_000_000_000_000_u64.into() - }); - - // This is the well-known Substrate account of Ferdie - let default_recepient = hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c"); - - let sub_recipient = if let Some(sub_recipient) = matches.value_of("sub-recipient") { - hex::decode(&sub_recipient) - .map_err(|err| err.to_string()) - .and_then(|vsub_recipient| { - let expected_len = default_recepient.len(); - if expected_len != vsub_recipient.len() { - Err(format!("invalid length. Expected {} bytes", expected_len)) - } else { - let mut sub_recipient = default_recepient; - sub_recipient.copy_from_slice(&vsub_recipient[..expected_len]); - Ok(sub_recipient) - } - }) - .map_err(|e| format!("Failed to parse sub-recipient: {}", e))? - } else { - default_recepient - }; - - let params = EthereumExchangeSubmitParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - eth_nonce, - eth_amount, - sub_recipient, - }; - - log::debug!(target: "bridge", "Submit Ethereum exchange tx params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result { - let mode = match matches.value_of("eth-tx-hash") { - Some(eth_tx_hash) => ethereum_exchange::ExchangeRelayMode::Single( - eth_tx_hash - .parse() - .map_err(|e| format!("Failed to parse eth-tx-hash: {}", e))?, - ), - None => ethereum_exchange::ExchangeRelayMode::Auto( - matches - .value_of("eth-start-with-block") - .map(|eth_start_with_block| { - eth_start_with_block - .parse() - .map_err(|e| format!("Failed to parse eth-start-with-block: {}", e)) - }) - .transpose()?, - ), - }; - - let params = EthereumExchangeParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - mode, - }; - - log::debug!(target: "bridge", "Ethereum exchange params: {:?}", params); - - Ok(params) -} - -fn metrics_params(matches: &clap::ArgMatches) -> Result { - if matches.is_present("no-prometheus") { - return Ok(None.into()); - } - - let mut metrics_params = MetricsAddress::default(); - - if let Some(prometheus_host) = matches.value_of("prometheus-host") { - metrics_params.host = prometheus_host.into(); - } - if let Some(prometheus_port) = matches.value_of("prometheus-port") { - metrics_params.port = prometheus_port - .parse() - .map_err(|e| format!("Failed to parse prometheus-port: {}", e))?; - } - - Ok(Some(metrics_params).into()) -} - -fn instance_params(matches: &clap::ArgMatches) -> Result, String> { - let instance = if let Some(instance) = matches.value_of("sub-pallet-instance") { - match instance.to_lowercase().as_str() { - "rialto" => Arc::new(RialtoPoA) as Arc, - "kovan" => Arc::new(Kovan), - _ => return Err("Unsupported bridge pallet instance".to_string()), - } - } else { - unreachable!("CLI config enforces a default instance, can never be None") - }; - - Ok(instance) -} - -fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> Result>, String> { - match matches.value_of(arg) { - Some(value) => Ok(Some( - hex::decode(value).map_err(|e| format!("Failed to parse {}: {}", arg, e))?, - )), - None => Ok(None), - } -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs b/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs deleted file mode 100644 index d9c0f265cbb95715ec8f2375e69b2de4eb6666ee..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::ethereum_sync_loop::QueuedEthereumHeader; -use crate::instances::BridgeInstance; -use crate::rpc_errors::RpcError; - -use async_trait::async_trait; -use bp_eth_poa::AuraHeader as SubstrateEthereumHeader; -use codec::{Decode, Encode}; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::types::HeaderId as EthereumHeaderId; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Client as SubstrateClient, TransactionSignScheme}; -use relay_utils::HeaderId; -use sp_core::{crypto::Pair, Bytes}; -use std::{collections::VecDeque, sync::Arc}; - -const ETH_API_IMPORT_REQUIRES_RECEIPTS: &str = "RialtoPoAHeaderApi_is_import_requires_receipts"; -const ETH_API_IS_KNOWN_BLOCK: &str = "RialtoPoAHeaderApi_is_known_block"; -const ETH_API_BEST_BLOCK: &str = "RialtoPoAHeaderApi_best_block"; -const ETH_API_BEST_FINALIZED_BLOCK: &str = "RialtoPoAHeaderApi_finalized_block"; -const EXCH_API_FILTER_TRANSACTION_PROOF: &str = "RialtoCurrencyExchangeApi_filter_transaction_proof"; - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum bridge module. -#[async_trait] -pub trait SubstrateHighLevelRpc { - /// Returns best Ethereum block that Substrate runtime knows of. - async fn best_ethereum_block(&self) -> RpcResult; - /// Returns best finalized Ethereum block that Substrate runtime knows of. - async fn best_ethereum_finalized_block(&self) -> RpcResult; - /// Returns whether or not transactions receipts are required for Ethereum header submission. - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult; - /// Returns whether or not the given Ethereum header is known to the Substrate runtime. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult; -} - -#[async_trait] -impl SubstrateHighLevelRpc for SubstrateClient { - async fn best_ethereum_block(&self) -> RpcResult { - let call = ETH_API_BEST_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn best_ethereum_finalized_block(&self) -> RpcResult { - let call = ETH_API_BEST_FINALIZED_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult { - let call = ETH_API_IMPORT_REQUIRES_RECEIPTS.to_string(); - let data = Bytes(header.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let receipts_required: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(receipts_required) - } - - // The Substrate module could prune old headers. So this function could return false even - // if header is synced. And we'll mark corresponding Ethereum header as Orphan. - // - // But when we read the best header from Substrate next time, we will know that - // there's a better header. This Orphan will either be marked as synced, or - // eventually pruned. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult { - let call = ETH_API_IS_KNOWN_BLOCK.to_string(); - let data = Bytes(header_id.1.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_known_block: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_known_block) - } -} - -/// A trait for RPC calls which are used to submit Ethereum headers to a Substrate -/// runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumHeaders { - /// Submits Ethereum header to Substrate runtime. - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders; - - /// Submits signed Ethereum header to Substrate runtime. - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; - - /// Submits unsigned Ethereum header to Substrate runtime. - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; -} - -#[async_trait] -impl SubmitEthereumHeaders for SubstrateClient { - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders { - if sign_transactions { - self.submit_signed_ethereum_headers(params, instance, headers).await - } else { - self.submit_unsigned_ethereum_headers(instance, headers).await - } - } - - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let ids = headers.iter().map(|header| header.id()).collect(); - let submission_result = async { - self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| { - Bytes( - Rialto::sign_transaction( - *self.genesis_hash(), - ¶ms, - transaction_nonce, - instance.build_signed_header_call(headers), - ) - .encode(), - ) - }) - .await?; - Ok(()) - } - .await; - - match submission_result { - Ok(_) => SubmittedHeaders { - submitted: ids, - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - }, - Err(error) => SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: ids, - fatal_error: Some(error), - }, - } - } - - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let mut ids = headers.iter().map(|header| header.id()).collect::>(); - let mut submitted_headers = SubmittedHeaders::default(); - - for header in headers { - let id = ids.pop_front().expect("both collections have same size; qed"); - - let call = instance.build_unsigned_header_call(header); - let transaction = create_unsigned_submit_transaction(call); - - match self.submit_unsigned_extrinsic(Bytes(transaction.encode())).await { - Ok(_) => submitted_headers.submitted.push(id), - Err(error) => { - submitted_headers.rejected.push(id); - submitted_headers.rejected.extend(ids); - submitted_headers.fatal_error = Some(error.into()); - break; - } - } - } - - submitted_headers - } -} - -/// A trait for RPC calls which are used to submit proof of Ethereum exchange transaction to a -/// Substrate runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumExchangeTransactionProof { - /// Pre-verify Ethereum exchange transaction proof. - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult; - /// Submits Ethereum exchange transaction proof to Substrate runtime. - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()>; -} - -#[async_trait] -impl SubmitEthereumExchangeTransactionProof for SubstrateClient { - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult { - let call = EXCH_API_FILTER_TRANSACTION_PROOF.to_string(); - let data = Bytes(proof.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_allowed: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_allowed) - } - - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()> { - self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| { - Bytes( - Rialto::sign_transaction( - *self.genesis_hash(), - ¶ms, - transaction_nonce, - instance.build_currency_exchange_call(proof), - ) - .encode(), - ) - }) - .await?; - Ok(()) - } -} - -/// Create unsigned Substrate transaction for submitting Ethereum header. -fn create_unsigned_submit_transaction(call: rialto_runtime::Call) -> rialto_runtime::UncheckedExtrinsic { - rialto_runtime::UncheckedExtrinsic::new_unsigned(call) -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs b/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs deleted file mode 100644 index 27b233135f325388c650065e9f6cc2dc028ab84c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use relay_ethereum_client::Error as EthereumNodeError; -use relay_substrate_client::Error as SubstrateNodeError; -use relay_utils::MaybeConnectionError; - -/// Contains common errors that can occur when -/// interacting with a Substrate or Ethereum node -/// through RPC. -#[derive(Debug)] -pub enum RpcError { - /// The arguments to the RPC method failed to serialize. - Serialization(serde_json::Error), - /// An error occured when interacting with an Ethereum node. - Ethereum(EthereumNodeError), - /// An error occured when interacting with a Substrate node. - Substrate(SubstrateNodeError), - /// Error running relay loop. - SyncLoop(String), -} - -impl From for String { - fn from(err: RpcError) -> Self { - match err { - RpcError::Serialization(e) => e.to_string(), - RpcError::Ethereum(e) => e.to_string(), - RpcError::Substrate(e) => e.to_string(), - RpcError::SyncLoop(e) => e, - } - } -} - -impl From for RpcError { - fn from(err: serde_json::Error) -> Self { - Self::Serialization(err) - } -} - -impl From for RpcError { - fn from(err: EthereumNodeError) -> Self { - Self::Ethereum(err) - } -} - -impl From for RpcError { - fn from(err: SubstrateNodeError) -> Self { - Self::Substrate(err) - } -} - -impl From for RpcError { - fn from(err: ethabi::Error) -> Self { - Self::Ethereum(EthereumNodeError::ResponseParseFailed(format!("{}", err))) - } -} - -impl MaybeConnectionError for RpcError { - fn is_connection_error(&self) -> bool { - match self { - RpcError::Ethereum(ref error) => error.is_connection_error(), - RpcError::Substrate(ref error) => error.is_connection_error(), - _ => false, - } - } -} - -impl From for RpcError { - fn from(err: codec::Error) -> Self { - Self::Substrate(SubstrateNodeError::ResponseParseFailed(err)) - } -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs b/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs deleted file mode 100644 index 542fd41f72732632426a98062d00dabcd7cbddd1..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-Substrate -> Ethereum PoA synchronization. - -use crate::ethereum_client::EthereumHighLevelRpc; -use crate::rpc_errors::RpcError; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::HeadersSyncParams, - sync_loop::TargetClient, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::Address, Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SyncHeader as RialtoSyncHeader}; -use relay_substrate_client::{ - headers_source::HeadersSource, Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; -use sp_runtime::EncodedJustification; - -use std::fmt::Debug; -use std::{collections::HashSet, time::Duration}; - -pub mod consts { - use super::*; - - /// Interval at which we check new Ethereum blocks. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(5); - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 8; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 4; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned). - pub const PRUNE_DEPTH: u32 = 256; -} - -/// Substrate synchronization parameters. -#[derive(Debug)] -pub struct SubstrateSyncParams { - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum bridge contract address. - pub eth_contract_address: Address, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: MetricsParams, -} - -/// Substrate synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct SubstrateHeadersSyncPipeline; - -impl HeadersSyncPipeline for SubstrateHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Substrate"; - const TARGET_NAME: &'static str = "Ethereum"; - - type Hash = rialto_runtime::Hash; - type Number = rialto_runtime::BlockNumber; - type Header = RialtoSyncHeader; - type Extra = (); - type Completion = EncodedJustification; - - fn estimate_size(source: &QueuedHeader) -> usize { - source.header().encode().len() - } -} - -/// Queued substrate header ID. -pub type QueuedRialtoHeader = QueuedHeader; - -/// Rialto node as headers source. -type SubstrateHeadersSource = HeadersSource; - -/// Ethereum client as Substrate headers target. -#[derive(Clone)] -struct EthereumHeadersTarget { - /// Ethereum node client. - client: EthereumClient, - /// Bridge contract address. - contract: Address, - /// Ethereum signing params. - sign_params: EthereumSigningParams, -} - -impl EthereumHeadersTarget { - fn new(client: EthereumClient, contract: Address, sign_params: EthereumSigningParams) -> Self { - Self { - client, - contract, - sign_params, - } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect().await.map_err(Into::into) - } -} - -#[async_trait] -impl TargetClient for EthereumHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Ethereum node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_substrate_block(self.contract).await - } - - async fn is_known_header(&self, id: RialtoHeaderId) -> Result<(RialtoHeaderId, bool), RpcError> { - self.client.substrate_header_known(self.contract, id).await - } - - async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { - self.client - .submit_substrate_headers(self.sign_params.clone(), self.contract, headers) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - self.client.incomplete_substrate_headers(self.contract).await - } - - async fn complete_header( - &self, - id: RialtoHeaderId, - completion: EncodedJustification, - ) -> Result { - self.client - .complete_substrate_header(self.sign_params.clone(), self.contract, id, completion) - .await - } - - async fn requires_extra(&self, header: QueuedRialtoHeader) -> Result<(RialtoHeaderId, bool), RpcError> { - Ok((header.header().id(), false)) - } -} - -/// Run Substrate headers synchronization. -pub async fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { - let SubstrateSyncParams { - sub_params, - eth_params, - eth_sign, - eth_contract_address, - sync_params, - metrics_params, - } = params; - - let eth_client = EthereumClient::new(eth_params).await; - let sub_client = SubstrateClient::::new(sub_params).await; - - let target = EthereumHeadersTarget::new(eth_client, eth_contract_address, eth_sign); - let source = SubstrateHeadersSource::new(sub_client); - - headers_relay::sync_loop::run( - source, - Rialto::AVERAGE_BLOCK_INTERVAL, - target, - consts::ETHEREUM_TICK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(RpcError::SyncLoop)?; - - Ok(()) -} diff --git a/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs b/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs deleted file mode 100644 index af68d7e0285557d4fc0edfd753dd1771c6a49e69..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Converting between Ethereum headers and bridge module types. - -use bp_eth_poa::{ - AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry, Receipt as SubstrateEthereumReceipt, - TransactionOutcome as SubstrateEthereumTransactionOutcome, -}; -use relay_ethereum_client::types::{ - Header as EthereumHeader, Receipt as EthereumReceipt, HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF, -}; - -/// Convert Ethereum header into Ethereum header for Substrate. -pub fn into_substrate_ethereum_header(header: &EthereumHeader) -> SubstrateEthereumHeader { - SubstrateEthereumHeader { - parent_hash: header.parent_hash, - timestamp: header.timestamp.as_u64(), - number: header.number.expect(ETHEREUM_HEADER_ID_PROOF).as_u64(), - author: header.author, - transactions_root: header.transactions_root, - uncles_hash: header.uncles_hash, - extra_data: header.extra_data.0.clone(), - state_root: header.state_root, - receipts_root: header.receipts_root, - log_bloom: header.logs_bloom.unwrap_or_default().data().into(), - gas_used: header.gas_used, - gas_limit: header.gas_limit, - difficulty: header.difficulty, - seal: header.seal_fields.iter().map(|s| s.0.clone()).collect(), - } -} - -/// Convert Ethereum transactions receipts into Ethereum transactions receipts for Substrate. -pub fn into_substrate_ethereum_receipts( - receipts: &Option>, -) -> Option> { - receipts - .as_ref() - .map(|receipts| receipts.iter().map(into_substrate_ethereum_receipt).collect()) -} - -/// Convert Ethereum transactions receipt into Ethereum transactions receipt for Substrate. -pub fn into_substrate_ethereum_receipt(receipt: &EthereumReceipt) -> SubstrateEthereumReceipt { - SubstrateEthereumReceipt { - gas_used: receipt.cumulative_gas_used, - log_bloom: receipt.logs_bloom.data().into(), - logs: receipt - .logs - .iter() - .map(|log_entry| SubstrateEthereumLogEntry { - address: log_entry.address, - topics: log_entry.topics.clone(), - data: log_entry.data.0.clone(), - }) - .collect(), - outcome: match (receipt.status, receipt.root) { - (Some(status), None) => SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8), - (None, Some(root)) => SubstrateEthereumTransactionOutcome::StateRoot(root), - _ => SubstrateEthereumTransactionOutcome::Unknown, - }, - } -} diff --git a/polkadot/bridges/relays/bin-substrate/Cargo.toml b/polkadot/bridges/relays/bin-substrate/Cargo.toml index 99f56cc3be483cb91c6708a0effeb50eee64871e..a28c61262f403d1eb6b94b7dcafaba14c72d1c6e 100644 --- a/polkadot/bridges/relays/bin-substrate/Cargo.toml +++ b/polkadot/bridges/relays/bin-substrate/Cargo.toml @@ -8,15 +8,16 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] anyhow = "1.0" async-std = "1.9.0" -async-trait = "0.1.42" -codec = { package = "parity-scale-codec", version = "2.0.0" } +codec = { package = "parity-scale-codec", version = "2.2.0" } futures = "0.3.12" hex = "0.4" log = "0.4.14" num-format = "0.4" num-traits = "0.2" paste = "1.0" +rand = "0.8" structopt = "0.3" +strum = { version = "0.21.0", features = ["derive"] } # Bridge dependencies @@ -27,38 +28,52 @@ bp-message-dispatch = { path = "../../primitives/message-dispatch" } bp-millau = { path = "../../primitives/chain-millau" } bp-polkadot = { path = "../../primitives/chain-polkadot" } bp-rialto = { path = "../../primitives/chain-rialto" } +bp-rialto-parachain = { path = "../../primitives/chain-rialto-parachain" } bp-rococo = { path = "../../primitives/chain-rococo" } +bp-token-swap = { path = "../../primitives/token-swap" } bp-wococo = { path = "../../primitives/chain-wococo" } bp-runtime = { path = "../../primitives/runtime" } bp-westend = { path = "../../primitives/chain-westend" } bridge-runtime-common = { path = "../../bin/runtime-common" } -finality-grandpa = "0.14.1" finality-relay = { path = "../finality" } -headers-relay = { path = "../headers" } messages-relay = { path = "../messages" } millau-runtime = { path = "../../bin/millau/runtime" } +pallet-bridge-dispatch = { path = "../../modules/dispatch" } pallet-bridge-messages = { path = "../../modules/messages" } +pallet-bridge-token-swap = { path = "../../modules/token-swap" } relay-kusama-client = { path = "../client-kusama" } relay-millau-client = { path = "../client-millau" } relay-polkadot-client = { path = "../client-polkadot" } relay-rialto-client = { path = "../client-rialto" } +relay-rialto-parachain-client = { path = "../client-rialto-parachain" } relay-rococo-client = { path = "../client-rococo" } relay-wococo-client = { path = "../client-wococo" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } relay-westend-client = { path = "../client-westend" } +rialto-parachain-runtime = { path = "../../bin/rialto-parachain/runtime" } rialto-runtime = { path = "../../bin/rialto/runtime" } +substrate-relay-helper = { path = "../lib-substrate-relay" } # Substrate Dependencies frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } +# Polkadot Dependencies + +polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "master" } +polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master" } + [dev-dependencies] hex-literal = "0.3" pallet-bridge-grandpa = { path = "../../modules/grandpa" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +tempfile = "3.2" +finality-grandpa = { version = "0.14.0" } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs new file mode 100644 index 0000000000000000000000000000000000000000..b12d23f2a56dc5139c81501d624bceccb593988d --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs @@ -0,0 +1,116 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use codec::Decode; +use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; +use relay_kusama_client::Kusama; +use sp_core::storage::StorageKey; +use sp_runtime::{FixedPointNumber, FixedU128}; +use sp_version::RuntimeVersion; + +use crate::cli::{ + bridge, + encode_call::{Call, CliEncodeCall}, + encode_message, CliChain, +}; + +/// Weight of the `system::remark` call at Kusama. +/// +/// This weight is larger (x2) than actual weight at current Kusama runtime to avoid unsuccessful +/// calls in the future. But since it is used only in tests (and on test chains), this is ok. +pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000; + +/// Id of Kusama token that is used to fetch token price. +pub(crate) const TOKEN_ID: &str = "kusama"; + +impl CliEncodeCall for Kusama { + fn max_extrinsic_size() -> u32 { + bp_kusama::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Remark { remark_payload, .. } => relay_kusama_client::runtime::Call::System( + relay_kusama_client::runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + ), + ), + Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => + match *bridge_instance_index { + bridge::KUSAMA_TO_POLKADOT_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + relay_kusama_client::runtime::Call::BridgePolkadotMessages( + relay_kusama_client::runtime::BridgePolkadotMessagesCall::send_message( + lane.0, payload, fee.0, + ), + ) + }, + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + _ => anyhow::bail!("Unsupported Kusama call: {:?}", call), + }) + } + + fn get_dispatch_info( + call: &relay_kusama_client::runtime::Call, + ) -> anyhow::Result { + match *call { + relay_kusama_client::runtime::Call::System( + relay_kusama_client::runtime::SystemCall::remark(_), + ) => Ok(DispatchInfo { + weight: crate::chains::kusama::SYSTEM_REMARK_CALL_WEIGHT, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }), + _ => anyhow::bail!("Unsupported Kusama call: {:?}", call), + } + } +} + +impl CliChain for Kusama { + const RUNTIME_VERSION: RuntimeVersion = bp_kusama::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = (); + + fn ss58_format() -> u16 { + 42 + } + + fn max_extrinsic_weight() -> Weight { + bp_kusama::max_extrinsic_weight() + } + + fn encode_message( + _message: encode_message::MessagePayload, + ) -> anyhow::Result { + anyhow::bail!("Sending messages from Kusama is not yet supported.") + } +} + +/// Storage key and initial value of Polkadot -> Kusama conversion rate. +pub(crate) fn polkadot_to_kusama_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + bp_runtime::storage_parameter_key( + bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME, + ), + // starting relay before this parameter will be set to some value may cause troubles + FixedU128::from_inner(FixedU128::DIV), + ) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs new file mode 100644 index 0000000000000000000000000000000000000000..ce631ef41e0aca463e8631f5d237a403a3536cba --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs @@ -0,0 +1,168 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Kusama-to-Polkadot headers sync entrypoint. + +use codec::Encode; +use sp_core::{Bytes, Pair}; + +use bp_header_chain::justification::GrandpaJustification; +use relay_kusama_client::{Kusama, SyncHeader as KusamaSyncHeader}; +use relay_polkadot_client::{Polkadot, SigningParams as PolkadotSigningParams}; +use relay_substrate_client::{Client, TransactionSignScheme, UnsignedTransaction}; +use relay_utils::metrics::MetricsParams; +use substrate_relay_helper::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, +}; + +/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat +/// relay as gone wild. +/// +/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 21 +/// DOT, but let's round up to 30 DOT here. +pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_polkadot::Balance = 30_000_000_000; + +/// Kusama-to-Polkadot finality sync pipeline. +pub(crate) type FinalityPipelineKusamaFinalityToPolkadot = + SubstrateFinalityToSubstrate; + +#[derive(Clone, Debug)] +pub(crate) struct KusamaFinalityToPolkadot { + finality_pipeline: FinalityPipelineKusamaFinalityToPolkadot, +} + +impl KusamaFinalityToPolkadot { + pub fn new(target_client: Client, target_sign: PolkadotSigningParams) -> Self { + Self { + finality_pipeline: FinalityPipelineKusamaFinalityToPolkadot::new( + target_client, + target_sign, + ), + } + } +} + +impl SubstrateFinalitySyncPipeline for KusamaFinalityToPolkadot { + type FinalitySyncPipeline = FinalityPipelineKusamaFinalityToPolkadot; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD; + + type TargetChain = Polkadot; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn start_relay_guards(&self) { + relay_substrate_client::guard::abort_on_spec_version_change( + self.finality_pipeline.target_client.clone(), + bp_polkadot::VERSION.spec_version, + ); + relay_substrate_client::guard::abort_when_account_balance_decreased( + self.finality_pipeline.target_client.clone(), + self.transactions_author(), + MAXIMAL_BALANCE_DECREASE_PER_DAY, + ); + } + + fn transactions_author(&self) -> bp_polkadot::AccountId { + (*self.finality_pipeline.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + era: bp_runtime::TransactionEraOf, + transaction_nonce: bp_runtime::IndexOf, + header: KusamaSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = relay_polkadot_client::runtime::Call::BridgeKusamaGrandpa( + relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::submit_finality_proof( + Box::new(header.into_inner()), + proof, + ), + ); + let genesis_hash = *self.finality_pipeline.target_client.genesis_hash(); + let transaction = Polkadot::sign_transaction( + genesis_hash, + &self.finality_pipeline.target_sign, + era, + UnsignedTransaction::new(call, transaction_nonce), + ); + + Bytes(transaction.encode()) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use frame_support::weights::WeightToFeePolynomial; + use pallet_bridge_grandpa::weights::WeightInfo; + + pub fn compute_maximal_balance_decrease_per_day(expected_source_headers_per_day: u32) -> B + where + B: From + std::ops::Mul, + W: WeightToFeePolynomial, + { + // we assume that the GRANDPA is not lagging here => ancestry length will be near to 0 + // (let's round up to 2) + const AVG_VOTES_ANCESTRIES_LEN: u32 = 2; + // let's assume number of validators is 1024 (more than on any existing well-known chain + // atm) => number of precommits is *2/3 + 1 + const AVG_PRECOMMITS_LEN: u32 = 1024 * 2 / 3 + 1; + + // GRANDPA pallet weights. We're now using Rialto weights everywhere. + // + // Using Rialto runtime is slightly incorrect, because `DbWeight` of other runtimes may + // differ from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is + // the same. + type GrandpaPalletWeights = + pallet_bridge_grandpa::weights::RialtoWeight; + + // The following formula shall not be treated as super-accurate - guard is to protect from + // mad relays, not to protect from over-average loses. + + // increase number of headers a bit + let expected_source_headers_per_day = expected_source_headers_per_day * 110 / 100; + let single_source_header_submit_call_weight = GrandpaPalletWeights::submit_finality_proof( + AVG_VOTES_ANCESTRIES_LEN, + AVG_PRECOMMITS_LEN, + ); + // for simplicity - add extra weight for base tx fee + fee that is paid for the tx size + + // adjusted fee + let single_source_header_submit_tx_weight = single_source_header_submit_call_weight * 3 / 2; + let single_source_header_tx_cost = W::calc(&single_source_header_submit_tx_weight); + single_source_header_tx_cost * B::from(expected_source_headers_per_day) + } + + #[test] + fn maximal_balance_decrease_per_day_is_sane() { + // we expect Kusama -> Polkadot relay to be running in mandatory-headers-only mode + // => we expect single header for every Kusama session + let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::< + bp_polkadot::Balance, + bp_polkadot::WeightToFee, + >(bp_kusama::DAYS / bp_kusama::SESSION_LENGTH + 1); + assert!( + MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease, + "Maximal expected loss per day {} is larger than hardcoded {}", + maximal_balance_decrease, + MAXIMAL_BALANCE_DECREASE_PER_DAY, + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs new file mode 100644 index 0000000000000000000000000000000000000000..32133adc3e54bdb2d13ab1c6b341bb7a3954daaf --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs @@ -0,0 +1,331 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Kusama-to-Polkadot messages sync entrypoint. + +use std::ops::RangeInclusive; + +use codec::Encode; +use frame_support::weights::Weight; +use sp_core::{Bytes, Pair}; + +use bp_messages::MessageNonce; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy}; +use relay_kusama_client::{ + HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams, +}; +use relay_polkadot_client::{ + HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams, +}; +use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction}; +use substrate_relay_helper::{ + messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, + SubstrateMessageLane, SubstrateMessageLaneToSubstrate, + }, + messages_source::SubstrateMessagesSource, + messages_target::SubstrateMessagesTarget, + STALL_TIMEOUT, +}; + +/// Kusama-to-Polkadot message lane. +pub type MessageLaneKusamaMessagesToPolkadot = + SubstrateMessageLaneToSubstrate; + +#[derive(Clone)] +pub struct KusamaMessagesToPolkadot { + message_lane: MessageLaneKusamaMessagesToPolkadot, +} + +impl SubstrateMessageLane for KusamaMessagesToPolkadot { + type MessageLane = MessageLaneKusamaMessagesToPolkadot; + + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = + bp_polkadot::TO_POLKADOT_MESSAGE_DETAILS_METHOD; + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = + bp_polkadot::TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_polkadot::TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD; + + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_kusama::FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = + bp_kusama::FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = + bp_kusama::FROM_KUSAMA_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = + bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD; + + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = + bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME; + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = + bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME; + + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = + bp_polkadot::PAY_INBOUND_DISPATCH_FEE_WEIGHT; + + type SourceChain = Kusama; + type TargetChain = Polkadot; + + fn source_transactions_author(&self) -> bp_kusama::AccountId { + (*self.message_lane.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( + &self, + best_block_id: KusamaHeaderId, + transaction_nonce: bp_runtime::IndexOf, + _generated_at_block: PolkadotHeaderId, + proof: ::MessagesReceivingProof, + ) -> Bytes { + let (relayers_state, proof) = proof; + let call = relay_kusama_client::runtime::Call::BridgePolkadotMessages( + relay_kusama_client::runtime::BridgePolkadotMessagesCall::receive_messages_delivery_proof( + proof, + relayers_state, + ), + ); + let genesis_hash = *self.message_lane.source_client.genesis_hash(); + let transaction = Kusama::sign_transaction( + genesis_hash, + &self.message_lane.source_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.source_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); + log::trace!( + target: "bridge", + "Prepared Polkadot -> Kusama confirmation transaction. Weight: /{}, size: {}/{}", + bp_kusama::max_extrinsic_weight(), + transaction.encode().len(), + bp_kusama::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } + + fn target_transactions_author(&self) -> bp_polkadot::AccountId { + (*self.message_lane.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( + &self, + best_block_id: PolkadotHeaderId, + transaction_nonce: bp_runtime::IndexOf, + _generated_at_header: KusamaHeaderId, + _nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Bytes { + let (dispatch_weight, proof) = proof; + let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof; + let messages_count = nonces_end - nonces_start + 1; + + let call = relay_polkadot_client::runtime::Call::BridgeKusamaMessages( + relay_polkadot_client::runtime::BridgeKusamaMessagesCall::receive_messages_proof( + self.message_lane.relayer_id_at_source.clone(), + proof, + messages_count as _, + dispatch_weight, + ), + ); + let genesis_hash = *self.message_lane.target_client.genesis_hash(); + let transaction = Polkadot::sign_transaction( + genesis_hash, + &self.message_lane.target_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.target_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); + log::trace!( + target: "bridge", + "Prepared Kusama -> Polkadot delivery transaction. Weight: /{}, size: {}/{}", + bp_polkadot::max_extrinsic_weight(), + transaction.encode().len(), + bp_polkadot::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } +} + +/// Kusama node as messages source. +type KusamaSourceClient = SubstrateMessagesSource; + +/// Polkadot node as messages target. +type PolkadotTargetClient = SubstrateMessagesTarget; + +/// Run Kusama-to-Polkadot messages sync. +pub async fn run( + params: MessagesRelayParams< + Kusama, + KusamaSigningParams, + Polkadot, + PolkadotSigningParams, + MixStrategy, + >, +) -> anyhow::Result<()> { + let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout( + params.source_transactions_mortality, + params.target_transactions_mortality, + Kusama::AVERAGE_BLOCK_INTERVAL, + Polkadot::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); + let relayer_id_at_kusama = (*params.source_sign.public().as_array_ref()).into(); + + let lane_id = params.lane_id; + let source_client = params.source_client; + let target_client = params.target_client; + let lane = KusamaMessagesToPolkadot { + message_lane: SubstrateMessageLaneToSubstrate { + source_client: source_client.clone(), + source_sign: params.source_sign, + source_transactions_mortality: params.source_transactions_mortality, + target_client: target_client.clone(), + target_sign: params.target_sign, + target_transactions_mortality: params.target_transactions_mortality, + relayer_id_at_source: relayer_id_at_kusama, + }, + }; + + // 2/3 is reserved for proofs and tx overhead + let max_messages_size_in_single_batch = bp_polkadot::max_extrinsic_size() / 3; + // we don't know exact weights of the Polkadot runtime. So to guess weights we'll be using + // weights from Rialto and then simply dividing it by x2. + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + select_delivery_transaction_limits::< + pallet_bridge_messages::weights::RialtoWeight, + >( + bp_polkadot::max_extrinsic_weight(), + bp_polkadot::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + (max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2); + + log::info!( + target: "bridge", + "Starting Kusama -> Polkadot messages relay.\n\t\ + Kusama relayer account id: {:?}\n\t\ + Max messages in single transaction: {}\n\t\ + Max messages size in single transaction: {}\n\t\ + Max messages weight in single transaction: {}\n\t\ + Tx mortality: {:?}/{:?}\n\t\ + Stall timeout: {:?}", + lane.message_lane.relayer_id_at_source, + max_messages_in_single_batch, + max_messages_size_in_single_batch, + max_messages_weight_in_single_batch, + params.source_transactions_mortality, + params.target_transactions_mortality, + stall_timeout, + ); + + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; + messages_relay::message_lane_loop::run( + messages_relay::message_lane_loop::Params { + lane: lane_id, + source_tick: Kusama::AVERAGE_BLOCK_INTERVAL, + target_tick: Polkadot::AVERAGE_BLOCK_INTERVAL, + reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, + stall_timeout, + delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { + max_unrewarded_relayer_entries_at_target: + bp_polkadot::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: + bp_polkadot::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_messages_in_single_batch, + max_messages_weight_in_single_batch, + max_messages_size_in_single_batch, + relay_strategy: params.relay_strategy, + }, + }, + KusamaSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + params.target_to_source_headers_relay, + ), + PolkadotTargetClient::new( + target_client, + lane, + lane_id, + standalone_metrics.clone(), + params.source_to_target_headers_relay, + ), + standalone_metrics.register_and_spawn(params.metrics_params)?, + futures::future::pending(), + ) + .await + .map_err(Into::into) +} + +/// Create standalone metrics for the Kusama -> Polkadot messages loop. +pub(crate) fn standalone_metrics( + source_client: Client, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( + source_client, + target_client, + Some(crate::chains::kusama::TOKEN_ID), + Some(crate::chains::polkadot::TOKEN_ID), + Some(crate::chains::polkadot::kusama_to_polkadot_conversion_rate_params()), + Some(crate::chains::kusama::polkadot_to_kusama_conversion_rate_params()), + ) +} + +/// Update Polkadot -> Kusama conversion rate, stored in Kusama runtime storage. +pub(crate) async fn update_polkadot_to_kusama_conversion_rate( + client: Client, + signer: ::AccountKeyPair, + updated_rate: f64, +) -> anyhow::Result<()> { + let genesis_hash = *client.genesis_hash(); + let signer_id = (*signer.public().as_array_ref()).into(); + client + .submit_signed_extrinsic(signer_id, move |_, transaction_nonce| { + Bytes( + Kusama::sign_transaction( + genesis_hash, + &signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + relay_kusama_client::runtime::Call::BridgePolkadotMessages( + relay_kusama_client::runtime::BridgePolkadotMessagesCall::update_pallet_parameter( + relay_kusama_client::runtime::BridgePolkadotMessagesParameter::PolkadotToKusamaConversionRate( + sp_runtime::FixedU128::from_float(updated_rate), + ) + ) + ), + transaction_nonce, + ), + ) + .encode(), + ) + }) + .await + .map(drop) + .map_err(|err| anyhow::format_err!("{:?}", err)) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs index 3cba16ea32f44160aebf684ab7f82a120bf2cc54..755d7cc4430a442e39d149e23ad60cc05b279d82 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs @@ -19,14 +19,26 @@ use crate::cli::{ bridge, encode_call::{self, Call, CliEncodeCall}, - encode_message, send_message, CliChain, + encode_message, + send_message::{self, DispatchFeePayment}, + CliChain, }; +use anyhow::anyhow; use bp_message_dispatch::{CallOrigin, MessagePayload}; use codec::Decode; use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight}; use relay_millau_client::Millau; +use sp_core::storage::StorageKey; +use sp_runtime::FixedU128; use sp_version::RuntimeVersion; +// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we +// want to test our code that is intended to work with real-value chains. So to keep it close to +// 1:1, we'll be treating Rialto as BTC and Millau as wBTC (only in relayer). + +/// The identifier of token, which value is associated with Millau token value by relayer. +pub(crate) const ASSOCIATED_TOKEN_ID: &str = crate::chains::kusama::TOKEN_ID; + impl CliEncodeCall for Millau { fn max_extrinsic_size() -> u32 { bp_millau::max_extrinsic_size() @@ -35,31 +47,32 @@ impl CliEncodeCall for Millau { fn encode_call(call: &Call) -> anyhow::Result { Ok(match call { Call::Raw { data } => Decode::decode(&mut &*data.0)?, - Call::Remark { remark_payload, .. } => millau_runtime::Call::System(millau_runtime::SystemCall::remark( - remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - )), - Call::Transfer { recipient, amount } => millau_runtime::Call::Balances( - millau_runtime::BalancesCall::transfer(recipient.raw_id(), amount.cast()), - ), - Call::BridgeSendMessage { - lane, - payload, - fee, - bridge_instance_index, - } => match *bridge_instance_index { - bridge::MILLAU_TO_RIALTO_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - millau_runtime::Call::BridgeRialtoMessages(millau_runtime::MessagesCall::send_message( - lane.0, - payload, - fee.cast(), - )) - } - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, + Call::Remark { remark_payload, .. } => + millau_runtime::Call::System(millau_runtime::SystemCall::remark { + remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + }), + Call::Transfer { recipient, amount } => + millau_runtime::Call::Balances(millau_runtime::BalancesCall::transfer { + dest: recipient.raw_id(), + value: amount.cast(), + }), + Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => + match *bridge_instance_index { + bridge::MILLAU_TO_RIALTO_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + millau_runtime::Call::BridgeRialtoMessages( + millau_runtime::MessagesCall::send_message { + lane_id: lane.0, + payload, + delivery_and_dispatch_fee: fee.cast(), + }, + ) + }, + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, }) } @@ -72,7 +85,12 @@ impl CliChain for Millau { const RUNTIME_VERSION: RuntimeVersion = millau_runtime::VERSION; type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload>; + type MessagePayload = MessagePayload< + bp_millau::AccountId, + bp_rialto::AccountSigner, + bp_rialto::Signature, + Vec, + >; fn ss58_format() -> u16 { millau_runtime::SS58Prefix::get() as u16 @@ -83,10 +101,12 @@ impl CliChain for Millau { } // TODO [#854|#843] support multiple bridges? - fn encode_message(message: encode_message::MessagePayload) -> Result { + fn encode_message( + message: encode_message::MessagePayload, + ) -> anyhow::Result { match message { encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| format!("Failed to decode Millau's MessagePayload: {:?}", e)), + .map_err(|e| anyhow!("Failed to decode Millau's MessagePayload: {:?}", e)), encode_message::MessagePayload::Call { mut call, mut sender } => { type Source = Millau; type Target = relay_rialto_client::Rialto; @@ -94,12 +114,29 @@ impl CliChain for Millau { sender.enforce_chain::(); let spec_version = Target::RUNTIME_VERSION.spec_version; let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::(&mut call, bridge::MILLAU_TO_RIALTO_INDEX); - let call = Target::encode_call(&call).map_err(|e| e.to_string())?; + encode_call::preprocess_call::( + &mut call, + bridge::MILLAU_TO_RIALTO_INDEX, + ); + let call = Target::encode_call(&call)?; let weight = call.get_dispatch_info().weight; - Ok(send_message::message_payload(spec_version, weight, origin, &call)) - } + Ok(send_message::message_payload( + spec_version, + weight, + origin, + &call, + DispatchFeePayment::AtSourceChain, + )) + }, } } } + +/// Storage key and initial value of Rialto -> Millau conversion rate. +pub(crate) fn rialto_to_millau_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + StorageKey(millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec()), + millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE, + ) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs index 58f0620b0764bf000b903f41476e2697b1d895f4..14a0430f6a9182f8487e9cf1e1177cb18d3db6e2 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs @@ -16,37 +16,64 @@ //! Millau-to-Rialto headers sync entrypoint. -use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use codec::Encode; +use sp_core::{Bytes, Pair}; use bp_header_chain::justification::GrandpaJustification; -use codec::Encode; use relay_millau_client::{Millau, SyncHeader as MillauSyncHeader}; use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Chain, TransactionSignScheme}; -use sp_core::{Bytes, Pair}; +use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; +use substrate_relay_helper::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, +}; /// Millau-to-Rialto finality sync pipeline. -pub(crate) type MillauFinalityToRialto = SubstrateFinalityToSubstrate; +pub(crate) type FinalityPipelineMillauToRialto = + SubstrateFinalityToSubstrate; + +#[derive(Clone, Debug)] +pub(crate) struct MillauFinalityToRialto { + finality_pipeline: FinalityPipelineMillauToRialto, +} + +impl MillauFinalityToRialto { + pub fn new(target_client: Client, target_sign: RialtoSigningParams) -> Self { + Self { finality_pipeline: FinalityPipelineMillauToRialto::new(target_client, target_sign) } + } +} impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto { - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + type FinalitySyncPipeline = FinalityPipelineMillauToRialto; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; type TargetChain = Rialto; fn transactions_author(&self) -> bp_rialto::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.finality_pipeline.target_sign.public().as_array_ref()).into() } fn make_submit_finality_proof_transaction( &self, - transaction_nonce: ::Index, + era: bp_runtime::TransactionEraOf, + transaction_nonce: IndexOf, header: MillauSyncHeader, proof: GrandpaJustification, ) -> Bytes { - let call = rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof(header.into_inner(), proof).into(); + let call = rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof { + finality_target: Box::new(header.into_inner()), + justification: proof, + } + .into(); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + let genesis_hash = *self.finality_pipeline.target_client.genesis_hash(); + let transaction = Rialto::sign_transaction( + genesis_hash, + &self.finality_pipeline.target_sign, + era, + UnsignedTransaction::new(call, transaction_nonce), + ); Bytes(transaction.encode()) } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs index 31dc51e9c27bb8d8c78fb39f18a3fffc97dc838a..c4179eea330f1cf6d61d9f2d032dfbca8184890e 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs @@ -16,64 +16,99 @@ //! Millau-to-Rialto messages sync entrypoint. -use crate::messages_lane::{ - select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, -}; -use crate::messages_source::SubstrateMessagesSource; -use crate::messages_target::SubstrateMessagesTarget; +use std::ops::RangeInclusive; -use bp_messages::MessageNonce; -use bp_runtime::{MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; use frame_support::dispatch::GetDispatchInfo; -use messages_relay::message_lane::MessageLane; -use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - metrics::{FloatStorageValueMetric, StorageProofOverheadMetric}, - Chain, TransactionSignScheme, -}; use sp_core::{Bytes, Pair}; -use std::{ops::RangeInclusive, time::Duration}; + +use bp_messages::MessageNonce; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use frame_support::weights::Weight; +use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy}; +use relay_millau_client::{ + HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams, +}; +use relay_rialto_client::{ + HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams, +}; +use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; +use substrate_relay_helper::{ + messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, + SubstrateMessageLane, SubstrateMessageLaneToSubstrate, + }, + messages_source::SubstrateMessagesSource, + messages_target::SubstrateMessagesTarget, + STALL_TIMEOUT, +}; /// Millau-to-Rialto message lane. -pub type MillauMessagesToRialto = +pub type MessageLaneMillauMessagesToRialto = SubstrateMessageLaneToSubstrate; +#[derive(Clone)] +pub struct MillauMessagesToRialto { + message_lane: MessageLaneMillauMessagesToRialto, +} + impl SubstrateMessageLane for MillauMessagesToRialto { - const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD; + type MessageLane = MessageLaneMillauMessagesToRialto; + + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = + bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD; const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_GENERATED_NONCE_METHOD; - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD; - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD; const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = bp_millau::FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD; - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = + bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = + bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME; + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME; + + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = + bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT; type SourceChain = Millau; type TargetChain = Rialto; fn source_transactions_author(&self) -> bp_millau::AccountId { - (*self.source_sign.public().as_array_ref()).into() + (*self.message_lane.source_sign.public().as_array_ref()).into() } fn make_messages_receiving_proof_transaction( &self, - transaction_nonce: ::Index, + best_block_id: MillauHeaderId, + transaction_nonce: IndexOf, _generated_at_block: RialtoHeaderId, - proof: ::MessagesReceivingProof, + proof: ::MessagesReceivingProof, ) -> Bytes { let (relayers_state, proof) = proof; let call: millau_runtime::Call = - millau_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into(); + millau_runtime::MessagesCall::receive_messages_delivery_proof { proof, relayers_state } + .into(); let call_weight = call.get_dispatch_info().weight; - let genesis_hash = *self.source_client.genesis_hash(); - let transaction = Millau::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.source_client.genesis_hash(); + let transaction = Millau::sign_transaction( + genesis_hash, + &self.message_lane.source_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.source_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Rialto -> Millau confirmation transaction. Weight: {}/{}, size: {}/{}", @@ -86,33 +121,38 @@ impl SubstrateMessageLane for MillauMessagesToRialto { } fn target_transactions_author(&self) -> bp_rialto::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.message_lane.target_sign.public().as_array_ref()).into() } fn make_messages_delivery_transaction( &self, - transaction_nonce: ::Index, + best_block_id: RialtoHeaderId, + transaction_nonce: IndexOf, _generated_at_header: MillauHeaderId, _nonces: RangeInclusive, - proof: ::MessagesProof, + proof: ::MessagesProof, ) -> Bytes { let (dispatch_weight, proof) = proof; - let FromBridgedChainMessagesProof { - ref nonces_start, - ref nonces_end, - .. - } = proof; + let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof; let messages_count = nonces_end - nonces_start + 1; - let call: rialto_runtime::Call = rialto_runtime::MessagesCall::receive_messages_proof( - self.relayer_id_at_source.clone(), + let call: rialto_runtime::Call = rialto_runtime::MessagesCall::receive_messages_proof { + relayer_id_at_bridged_chain: self.message_lane.relayer_id_at_source.clone(), proof, - messages_count as _, + messages_count: messages_count as _, dispatch_weight, - ) + } .into(); let call_weight = call.get_dispatch_info().weight; - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.target_client.genesis_hash(); + let transaction = Rialto::sign_transaction( + genesis_hash, + &self.message_lane.target_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.target_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Millau -> Rialto delivery transaction. Weight: {}/{}, size: {}/{}", @@ -126,35 +166,52 @@ impl SubstrateMessageLane for MillauMessagesToRialto { } /// Millau node as messages source. -type MillauSourceClient = - SubstrateMessagesSource; +type MillauSourceClient = SubstrateMessagesSource; /// Rialto node as messages target. -type RialtoTargetClient = - SubstrateMessagesTarget; +type RialtoTargetClient = SubstrateMessagesTarget; /// Run Millau-to-Rialto messages sync. pub async fn run( - params: MessagesRelayParams, -) -> Result<(), String> { - let stall_timeout = Duration::from_secs(5 * 60); + params: MessagesRelayParams< + Millau, + MillauSigningParams, + Rialto, + RialtoSigningParams, + MixStrategy, + >, +) -> anyhow::Result<()> { + let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout( + params.source_transactions_mortality, + params.target_transactions_mortality, + Millau::AVERAGE_BLOCK_INTERVAL, + Rialto::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); let relayer_id_at_millau = (*params.source_sign.public().as_array_ref()).into(); let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = MillauMessagesToRialto { - source_client: source_client.clone(), - source_sign: params.source_sign, - target_client: params.target_client.clone(), - target_sign: params.target_sign, - relayer_id_at_source: relayer_id_at_millau, + message_lane: SubstrateMessageLaneToSubstrate { + source_client: source_client.clone(), + source_sign: params.source_sign, + source_transactions_mortality: params.source_transactions_mortality, + target_client: target_client.clone(), + target_sign: params.target_sign, + target_transactions_mortality: params.target_transactions_mortality, + relayer_id_at_source: relayer_id_at_millau, + }, }; // 2/3 is reserved for proofs and tx overhead let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() / 3; // TODO: use Millau weights after https://github.com/paritytech/parity-bridges-common/issues/390 let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( + select_delivery_transaction_limits::< + pallet_bridge_messages::weights::RialtoWeight, + >( bp_rialto::max_extrinsic_weight(), bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, ); @@ -165,13 +222,22 @@ pub async fn run( Millau relayer account id: {:?}\n\t\ Max messages in single transaction: {}\n\t\ Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}", - lane.relayer_id_at_source, + Max messages weight in single transaction: {}\n\t\ + Tx mortality: {:?}/{:?}\n\t\ + Stall timeout: {:?}", + lane.message_lane.relayer_id_at_source, max_messages_in_single_batch, max_messages_size_in_single_batch, max_messages_weight_in_single_batch, + params.source_transactions_mortality, + params.target_transactions_mortality, + stall_timeout, ); + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -180,58 +246,80 @@ pub async fn run( reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, stall_timeout, delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - max_unconfirmed_nonces_at_target: bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_unrewarded_relayer_entries_at_target: + bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: + bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, max_messages_in_single_batch, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, - relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, + relay_strategy: params.relay_strategy, }, }, MillauSourceClient::new( source_client.clone(), lane.clone(), lane_id, - RIALTO_CHAIN_ID, params.target_to_source_headers_relay, ), RialtoTargetClient::new( - params.target_client, + target_client, lane, lane_id, - MILLAU_CHAIN_ID, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - relay_utils::relay_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - MillauMessagesToRialto, - >(&lane_id)), - params.metrics_params, - ) - .standalone_metric(|registry, prefix| { - StorageProofOverheadMetric::new( - registry, - prefix, - source_client.clone(), - "millau_storage_proof_overhead".into(), - "Millau storage proof overhead".into(), - ) - })? - .standalone_metric(|registry, prefix| { - FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( - registry, - prefix, - source_client, - sp_core::storage::StorageKey( - millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec(), - ), - Some(millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE), - "millau_rialto_to_millau_conversion_rate".into(), - "Rialto to Millau tokens conversion rate (used by Rialto)".into(), - ) - })? - .into_params(), + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await + .map_err(Into::into) +} + +/// Create standalone metrics for the Millau -> Rialto messages loop. +pub(crate) fn standalone_metrics( + source_client: Client, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( + source_client, + target_client, + Some(crate::chains::millau::ASSOCIATED_TOKEN_ID), + Some(crate::chains::rialto::ASSOCIATED_TOKEN_ID), + Some(crate::chains::rialto::millau_to_rialto_conversion_rate_params()), + Some(crate::chains::millau::rialto_to_millau_conversion_rate_params()), + ) +} + +/// Update Rialto -> Millau conversion rate, stored in Millau runtime storage. +pub(crate) async fn update_rialto_to_millau_conversion_rate( + client: Client, + signer: ::AccountKeyPair, + updated_rate: f64, +) -> anyhow::Result<()> { + let genesis_hash = *client.genesis_hash(); + let signer_id = (*signer.public().as_array_ref()).into(); + client + .submit_signed_extrinsic(signer_id, move |_, transaction_nonce| { + Bytes( + Millau::sign_transaction( + genesis_hash, + &signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + millau_runtime::MessagesCall::update_pallet_parameter { + parameter: millau_runtime::rialto_messages::MillauToRialtoMessagesParameter::RialtoToMillauConversionRate( + sp_runtime::FixedU128::from_float(updated_rate), + ), + } + .into(), + transaction_nonce, + ), + ) + .encode(), + ) + }) + .await + .map(drop) + .map_err(|err| anyhow::format_err!("{:?}", err)) } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs b/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs index 09d3c3e9c060a9f114a605e1f3ad5a3c353ffa01..e9cb2d9b737f1a55caa6ce9beea3870e8ffa95b0 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs @@ -16,8 +16,12 @@ //! Chain-specific relayer configuration. +pub mod kusama_headers_to_polkadot; +pub mod kusama_messages_to_polkadot; pub mod millau_headers_to_rialto; pub mod millau_messages_to_rialto; +pub mod polkadot_headers_to_kusama; +pub mod polkadot_messages_to_kusama; pub mod rialto_headers_to_millau; pub mod rialto_messages_to_millau; pub mod rococo_headers_to_wococo; @@ -26,45 +30,25 @@ pub mod westend_headers_to_millau; pub mod wococo_headers_to_rococo; pub mod wococo_messages_to_rococo; +mod kusama; mod millau; +mod polkadot; mod rialto; +mod rialto_parachain; mod rococo; mod westend; mod wococo; -use relay_utils::metrics::{FloatJsonValueMetric, MetricsParams}; +use relay_utils::metrics::{MetricsParams, StandaloneMetric}; pub(crate) fn add_polkadot_kusama_price_metrics( params: MetricsParams, ) -> anyhow::Result { - Ok( - relay_utils::relay_metrics(Some(finality_relay::metrics_prefix::()), params) - // Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <-> Kusama - // relays, but we want to test metrics/dashboards in advance - .standalone_metric(|registry, prefix| { - FloatJsonValueMetric::new( - registry, - prefix, - "https://api.coingecko.com/api/v3/simple/price?ids=Polkadot&vs_currencies=btc".into(), - "$.polkadot.btc".into(), - "polkadot_to_base_conversion_rate".into(), - "Rate used to convert from DOT to some BASE tokens".into(), - ) - }) - .map_err(|e| anyhow::format_err!("{}", e))? - .standalone_metric(|registry, prefix| { - FloatJsonValueMetric::new( - registry, - prefix, - "https://api.coingecko.com/api/v3/simple/price?ids=Kusama&vs_currencies=btc".into(), - "$.kusama.btc".into(), - "kusama_to_base_conversion_rate".into(), - "Rate used to convert from KSM to some BASE tokens".into(), - ) - }) - .map_err(|e| anyhow::format_err!("{}", e))? - .into_params(), - ) + substrate_relay_helper::helpers::token_price_metric(polkadot::TOKEN_ID)? + .register_and_spawn(¶ms.registry)?; + substrate_relay_helper::helpers::token_price_metric(kusama::TOKEN_ID)? + .register_and_spawn(¶ms.registry)?; + Ok(params) } #[cfg(test)] @@ -75,7 +59,7 @@ mod tests { use frame_support::dispatch::GetDispatchInfo; use relay_millau_client::Millau; use relay_rialto_client::Rialto; - use relay_substrate_client::TransactionSignScheme; + use relay_substrate_client::{TransactionSignScheme, UnsignedTransaction}; use sp_core::Pair; use sp_runtime::traits::{IdentifyAccount, Verify}; @@ -83,7 +67,8 @@ mod tests { fn millau_signature_is_valid_on_rialto() { let millau_sign = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); - let call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); + let call = + rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { remark: vec![] }); let millau_public: bp_millau::AccountSigner = millau_sign.public().into(); let millau_account_id: bp_millau::AccountId = millau_public.into_account(); @@ -94,7 +79,8 @@ mod tests { rialto_runtime::VERSION.spec_version, ); - let rialto_signer = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); + let rialto_signer = + relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); let signature = rialto_signer.sign(&digest); assert!(signature.verify(&digest[..], &rialto_signer.public())); @@ -104,7 +90,8 @@ mod tests { fn rialto_signature_is_valid_on_millau() { let rialto_sign = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); - let call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); + let call = + millau_runtime::Call::System(millau_runtime::SystemCall::remark { remark: vec![] }); let rialto_public: bp_rialto::AccountSigner = rialto_sign.public().into(); let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account(); @@ -115,7 +102,8 @@ mod tests { millau_runtime::VERSION.spec_version, ); - let millau_signer = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); + let millau_signer = + relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); let signature = millau_signer.sign(&digest); assert!(signature.verify(&digest[..], &millau_signer.public())); @@ -130,22 +118,27 @@ mod tests { bp_millau::max_extrinsic_size(), ); - let call: millau_runtime::Call = millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into(); + let call: millau_runtime::Call = + millau_runtime::SystemCall::remark { remark: vec![42; maximal_remark_size as _] } + .into(); let payload = send_message::message_payload( Default::default(), call.get_dispatch_info().weight, bp_message_dispatch::CallOrigin::SourceRoot, &call, + send_message::DispatchFeePayment::AtSourceChain, ); assert_eq!(Millau::verify_message(&payload), Ok(())); let call: millau_runtime::Call = - millau_runtime::SystemCall::remark(vec![42; (maximal_remark_size + 1) as _]).into(); + millau_runtime::SystemCall::remark { remark: vec![42; (maximal_remark_size + 1) as _] } + .into(); let payload = send_message::message_payload( Default::default(), call.get_dispatch_info().weight, bp_message_dispatch::CallOrigin::SourceRoot, &call, + send_message::DispatchFeePayment::AtSourceChain, ); assert!(Millau::verify_message(&payload).is_err()); } @@ -164,15 +157,18 @@ mod tests { fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() { use rialto_runtime::millau_messages::Millau; - let maximal_dispatch_weight = - send_message::compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight()); - let call: millau_runtime::Call = rialto_runtime::SystemCall::remark(vec![]).into(); + let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight( + bp_millau::max_extrinsic_weight(), + ); + let call: millau_runtime::Call = + rialto_runtime::SystemCall::remark { remark: vec![] }.into(); let payload = send_message::message_payload( Default::default(), maximal_dispatch_weight, bp_message_dispatch::CallOrigin::SourceRoot, &call, + send_message::DispatchFeePayment::AtSourceChain, ); assert_eq!(Millau::verify_message(&payload), Ok(())); @@ -181,6 +177,7 @@ mod tests { maximal_dispatch_weight + 1, bp_message_dispatch::CallOrigin::SourceRoot, &call, + send_message::DispatchFeePayment::AtSourceChain, ); assert!(Millau::verify_message(&payload).is_err()); } @@ -189,15 +186,18 @@ mod tests { fn maximal_weight_fill_block_to_rialto_is_generated_correctly() { use millau_runtime::rialto_messages::Rialto; - let maximal_dispatch_weight = - send_message::compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); - let call: rialto_runtime::Call = millau_runtime::SystemCall::remark(vec![]).into(); + let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight( + bp_rialto::max_extrinsic_weight(), + ); + let call: rialto_runtime::Call = + millau_runtime::SystemCall::remark { remark: vec![] }.into(); let payload = send_message::message_payload( Default::default(), maximal_dispatch_weight, bp_message_dispatch::CallOrigin::SourceRoot, &call, + send_message::DispatchFeePayment::AtSourceChain, ); assert_eq!(Rialto::verify_message(&payload), Ok(())); @@ -206,18 +206,20 @@ mod tests { maximal_dispatch_weight + 1, bp_message_dispatch::CallOrigin::SourceRoot, &call, + send_message::DispatchFeePayment::AtSourceChain, ); assert!(Rialto::verify_message(&payload).is_err()); } #[test] fn rialto_tx_extra_bytes_constant_is_correct() { - let rialto_call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); + let rialto_call = + rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { remark: vec![] }); let rialto_tx = Rialto::sign_transaction( Default::default(), &sp_keyring::AccountKeyring::Alice.pair(), - 0, - rialto_call.clone(), + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new(rialto_call.clone(), 0), ); let extra_bytes_in_transaction = rialto_tx.encode().len() - rialto_call.encode().len(); assert!( @@ -230,12 +232,13 @@ mod tests { #[test] fn millau_tx_extra_bytes_constant_is_correct() { - let millau_call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); + let millau_call = + millau_runtime::Call::System(millau_runtime::SystemCall::remark { remark: vec![] }); let millau_tx = Millau::sign_transaction( Default::default(), &sp_keyring::AccountKeyring::Alice.pair(), - 0, - millau_call.clone(), + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new(millau_call.clone(), 0), ); let extra_bytes_in_transaction = millau_tx.encode().len() - millau_call.encode().len(); assert!( @@ -274,13 +277,14 @@ mod rococo_tests { }; let actual = relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof( - header.clone(), + Box::new(header.clone()), justification.clone(), ); - let expected = millau_runtime::BridgeGrandpaRialtoCall::::submit_finality_proof( - header, - justification, - ); + let expected = + millau_runtime::BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(header), + justification, + }; // when let actual_encoded = actual.encode(); @@ -321,11 +325,15 @@ mod westend_tests { votes_ancestries: vec![], }; - let actual = bp_westend::BridgeGrandpaRococoCall::submit_finality_proof(header.clone(), justification.clone()); - let expected = millau_runtime::BridgeGrandpaRialtoCall::::submit_finality_proof( - header, - justification, + let actual = relay_kusama_client::runtime::BridgePolkadotGrandpaCall::submit_finality_proof( + Box::new(header.clone()), + justification.clone(), ); + let expected = + millau_runtime::BridgeGrandpaCall::::submit_finality_proof { + finality_target: Box::new(header), + justification, + }; // when let actual_encoded = actual.encode(); diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs new file mode 100644 index 0000000000000000000000000000000000000000..7b6256d1749f8854a5e80efa72c8d0463436dd7d --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs @@ -0,0 +1,116 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use codec::Decode; +use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; +use relay_polkadot_client::Polkadot; +use sp_core::storage::StorageKey; +use sp_runtime::{FixedPointNumber, FixedU128}; +use sp_version::RuntimeVersion; + +use crate::cli::{ + bridge, + encode_call::{Call, CliEncodeCall}, + encode_message, CliChain, +}; + +/// Weight of the `system::remark` call at Polkadot. +/// +/// This weight is larger (x2) than actual weight at current Polkadot runtime to avoid unsuccessful +/// calls in the future. But since it is used only in tests (and on test chains), this is ok. +pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000; + +/// Id of Polkadot token that is used to fetch token price. +pub(crate) const TOKEN_ID: &str = "polkadot"; + +impl CliEncodeCall for Polkadot { + fn max_extrinsic_size() -> u32 { + bp_polkadot::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Remark { remark_payload, .. } => relay_polkadot_client::runtime::Call::System( + relay_polkadot_client::runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + ), + ), + Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => + match *bridge_instance_index { + bridge::POLKADOT_TO_KUSAMA_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + relay_polkadot_client::runtime::Call::BridgeKusamaMessages( + relay_polkadot_client::runtime::BridgeKusamaMessagesCall::send_message( + lane.0, payload, fee.0, + ), + ) + }, + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + _ => anyhow::bail!("Unsupported Polkadot call: {:?}", call), + }) + } + + fn get_dispatch_info( + call: &relay_polkadot_client::runtime::Call, + ) -> anyhow::Result { + match *call { + relay_polkadot_client::runtime::Call::System( + relay_polkadot_client::runtime::SystemCall::remark(_), + ) => Ok(DispatchInfo { + weight: crate::chains::polkadot::SYSTEM_REMARK_CALL_WEIGHT, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }), + _ => anyhow::bail!("Unsupported Polkadot call: {:?}", call), + } + } +} + +impl CliChain for Polkadot { + const RUNTIME_VERSION: RuntimeVersion = bp_polkadot::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = (); + + fn ss58_format() -> u16 { + 42 + } + + fn max_extrinsic_weight() -> Weight { + bp_polkadot::max_extrinsic_weight() + } + + fn encode_message( + _message: encode_message::MessagePayload, + ) -> anyhow::Result { + anyhow::bail!("Sending messages from Polkadot is not yet supported.") + } +} + +/// Storage key and initial value of Kusama -> Polkadot conversion rate. +pub(crate) fn kusama_to_polkadot_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + bp_runtime::storage_parameter_key( + bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME, + ), + // starting relay before this parameter will be set to some value may cause troubles + FixedU128::from_inner(FixedU128::DIV), + ) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs new file mode 100644 index 0000000000000000000000000000000000000000..b1948b234cc31c54cf251ddb524590e195e9d051 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs @@ -0,0 +1,131 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Polkadot-to-Kusama headers sync entrypoint. + +use codec::Encode; +use sp_core::{Bytes, Pair}; + +use bp_header_chain::justification::GrandpaJustification; +use relay_kusama_client::{Kusama, SigningParams as KusamaSigningParams}; +use relay_polkadot_client::{Polkadot, SyncHeader as PolkadotSyncHeader}; +use relay_substrate_client::{Client, TransactionSignScheme, UnsignedTransaction}; +use relay_utils::metrics::MetricsParams; +use substrate_relay_helper::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, +}; + +/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat +/// relay as gone wild. +/// +/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 0.001 +/// KSM, but let's round up to 0.1 KSM here. +pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_polkadot::Balance = 100_000_000_000; + +/// Polkadot-to-Kusama finality sync pipeline. +pub(crate) type FinalityPipelinePolkadotFinalityToKusama = + SubstrateFinalityToSubstrate; + +#[derive(Clone, Debug)] +pub(crate) struct PolkadotFinalityToKusama { + finality_pipeline: FinalityPipelinePolkadotFinalityToKusama, +} + +impl PolkadotFinalityToKusama { + pub fn new(target_client: Client, target_sign: KusamaSigningParams) -> Self { + Self { + finality_pipeline: FinalityPipelinePolkadotFinalityToKusama::new( + target_client, + target_sign, + ), + } + } +} + +impl SubstrateFinalitySyncPipeline for PolkadotFinalityToKusama { + type FinalitySyncPipeline = FinalityPipelinePolkadotFinalityToKusama; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD; + + type TargetChain = Kusama; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn start_relay_guards(&self) { + relay_substrate_client::guard::abort_on_spec_version_change( + self.finality_pipeline.target_client.clone(), + bp_kusama::VERSION.spec_version, + ); + relay_substrate_client::guard::abort_when_account_balance_decreased( + self.finality_pipeline.target_client.clone(), + self.transactions_author(), + MAXIMAL_BALANCE_DECREASE_PER_DAY, + ); + } + + fn transactions_author(&self) -> bp_kusama::AccountId { + (*self.finality_pipeline.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + era: bp_runtime::TransactionEraOf, + transaction_nonce: bp_runtime::IndexOf, + header: PolkadotSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = relay_kusama_client::runtime::Call::BridgePolkadotGrandpa( + relay_kusama_client::runtime::BridgePolkadotGrandpaCall::submit_finality_proof( + Box::new(header.into_inner()), + proof, + ), + ); + let genesis_hash = *self.finality_pipeline.target_client.genesis_hash(); + let transaction = Kusama::sign_transaction( + genesis_hash, + &self.finality_pipeline.target_sign, + era, + UnsignedTransaction::new(call, transaction_nonce), + ); + + Bytes(transaction.encode()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::chains::kusama_headers_to_polkadot::tests::compute_maximal_balance_decrease_per_day; + + #[test] + fn maximal_balance_decrease_per_day_is_sane() { + // we expect Polkadot -> Kusama relay to be running in mandatory-headers-only mode + // => we expect single header for every Polkadot session + let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::< + bp_kusama::Balance, + bp_kusama::WeightToFee, + >(bp_polkadot::DAYS / bp_polkadot::SESSION_LENGTH + 1); + assert!( + MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease, + "Maximal expected loss per day {} is larger than hardcoded {}", + maximal_balance_decrease, + MAXIMAL_BALANCE_DECREASE_PER_DAY, + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs new file mode 100644 index 0000000000000000000000000000000000000000..bc7f222430922997e08032448780cc4bef36bec9 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs @@ -0,0 +1,330 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Polkadot-to-Kusama messages sync entrypoint. + +use std::ops::RangeInclusive; + +use codec::Encode; +use sp_core::{Bytes, Pair}; + +use bp_messages::MessageNonce; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use frame_support::weights::Weight; +use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy}; +use relay_kusama_client::{ + HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams, +}; +use relay_polkadot_client::{ + HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams, +}; +use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction}; +use substrate_relay_helper::{ + messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, + SubstrateMessageLane, SubstrateMessageLaneToSubstrate, + }, + messages_source::SubstrateMessagesSource, + messages_target::SubstrateMessagesTarget, + STALL_TIMEOUT, +}; + +/// Polkadot-to-Kusama message lane. +pub type MessageLanePolkadotMessagesToKusama = + SubstrateMessageLaneToSubstrate; + +#[derive(Clone)] +pub struct PolkadotMessagesToKusama { + message_lane: MessageLanePolkadotMessagesToKusama, +} + +impl SubstrateMessageLane for PolkadotMessagesToKusama { + type MessageLane = MessageLanePolkadotMessagesToKusama; + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = + bp_kusama::TO_KUSAMA_MESSAGE_DETAILS_METHOD; + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = + bp_kusama::TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_kusama::TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD; + + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_polkadot::FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = + bp_polkadot::FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = + bp_polkadot::FROM_POLKADOT_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = + bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD; + + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = + bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME; + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = + bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME; + + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = + bp_kusama::PAY_INBOUND_DISPATCH_FEE_WEIGHT; + + type SourceChain = Polkadot; + type TargetChain = Kusama; + + fn source_transactions_author(&self) -> bp_polkadot::AccountId { + (*self.message_lane.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( + &self, + best_block_id: PolkadotHeaderId, + transaction_nonce: bp_runtime::IndexOf, + _generated_at_block: KusamaHeaderId, + proof: ::MessagesReceivingProof, + ) -> Bytes { + let (relayers_state, proof) = proof; + let call = relay_polkadot_client::runtime::Call::BridgeKusamaMessages( + relay_polkadot_client::runtime::BridgeKusamaMessagesCall::receive_messages_delivery_proof( + proof, + relayers_state, + ), + ); + let genesis_hash = *self.message_lane.source_client.genesis_hash(); + let transaction = Polkadot::sign_transaction( + genesis_hash, + &self.message_lane.source_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.source_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); + log::trace!( + target: "bridge", + "Prepared Kusama -> Polkadot confirmation transaction. Weight: /{}, size: {}/{}", + bp_polkadot::max_extrinsic_weight(), + transaction.encode().len(), + bp_polkadot::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } + + fn target_transactions_author(&self) -> bp_kusama::AccountId { + (*self.message_lane.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( + &self, + best_block_id: KusamaHeaderId, + transaction_nonce: bp_runtime::IndexOf, + _generated_at_header: PolkadotHeaderId, + _nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Bytes { + let (dispatch_weight, proof) = proof; + let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof; + let messages_count = nonces_end - nonces_start + 1; + + let call = relay_kusama_client::runtime::Call::BridgePolkadotMessages( + relay_kusama_client::runtime::BridgePolkadotMessagesCall::receive_messages_proof( + self.message_lane.relayer_id_at_source.clone(), + proof, + messages_count as _, + dispatch_weight, + ), + ); + let genesis_hash = *self.message_lane.target_client.genesis_hash(); + let transaction = Kusama::sign_transaction( + genesis_hash, + &self.message_lane.target_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.target_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); + log::trace!( + target: "bridge", + "Prepared Polkadot -> Kusama delivery transaction. Weight: /{}, size: {}/{}", + bp_kusama::max_extrinsic_weight(), + transaction.encode().len(), + bp_kusama::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } +} + +/// Polkadot node as messages source. +type PolkadotSourceClient = SubstrateMessagesSource; + +/// Kusama node as messages target. +type KusamaTargetClient = SubstrateMessagesTarget; + +/// Run Polkadot-to-Kusama messages sync. +pub async fn run( + params: MessagesRelayParams< + Polkadot, + PolkadotSigningParams, + Kusama, + KusamaSigningParams, + MixStrategy, + >, +) -> anyhow::Result<()> { + let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout( + params.source_transactions_mortality, + params.target_transactions_mortality, + Polkadot::AVERAGE_BLOCK_INTERVAL, + Kusama::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); + let relayer_id_at_polkadot = (*params.source_sign.public().as_array_ref()).into(); + + let lane_id = params.lane_id; + let source_client = params.source_client; + let target_client = params.target_client; + let lane = PolkadotMessagesToKusama { + message_lane: SubstrateMessageLaneToSubstrate { + source_client: source_client.clone(), + source_sign: params.source_sign, + source_transactions_mortality: params.source_transactions_mortality, + target_client: target_client.clone(), + target_sign: params.target_sign, + target_transactions_mortality: params.target_transactions_mortality, + relayer_id_at_source: relayer_id_at_polkadot, + }, + }; + + // 2/3 is reserved for proofs and tx overhead + let max_messages_size_in_single_batch = bp_kusama::max_extrinsic_size() / 3; + // we don't know exact weights of the Kusama runtime. So to guess weights we'll be using + // weights from Rialto and then simply dividing it by x2. + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + select_delivery_transaction_limits::< + pallet_bridge_messages::weights::RialtoWeight, + >( + bp_kusama::max_extrinsic_weight(), + bp_kusama::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + (max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2); + + log::info!( + target: "bridge", + "Starting Polkadot -> Kusama messages relay.\n\t\ + Polkadot relayer account id: {:?}\n\t\ + Max messages in single transaction: {}\n\t\ + Max messages size in single transaction: {}\n\t\ + Max messages weight in single transaction: {}\n\t\ + Tx mortality: {:?}/{:?}\n\t\ + Stall timeout: {:?}", + lane.message_lane.relayer_id_at_source, + max_messages_in_single_batch, + max_messages_size_in_single_batch, + max_messages_weight_in_single_batch, + params.source_transactions_mortality, + params.target_transactions_mortality, + stall_timeout, + ); + + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; + messages_relay::message_lane_loop::run( + messages_relay::message_lane_loop::Params { + lane: lane_id, + source_tick: Polkadot::AVERAGE_BLOCK_INTERVAL, + target_tick: Kusama::AVERAGE_BLOCK_INTERVAL, + reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, + stall_timeout, + delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { + max_unrewarded_relayer_entries_at_target: + bp_kusama::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: + bp_kusama::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_messages_in_single_batch, + max_messages_weight_in_single_batch, + max_messages_size_in_single_batch, + relay_strategy: params.relay_strategy, + }, + }, + PolkadotSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + params.target_to_source_headers_relay, + ), + KusamaTargetClient::new( + target_client, + lane, + lane_id, + standalone_metrics.clone(), + params.source_to_target_headers_relay, + ), + standalone_metrics.register_and_spawn(params.metrics_params)?, + futures::future::pending(), + ) + .await + .map_err(Into::into) +} + +/// Create standalone metrics for the Polkadot -> Kusama messages loop. +pub(crate) fn standalone_metrics( + source_client: Client, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( + source_client, + target_client, + Some(crate::chains::polkadot::TOKEN_ID), + Some(crate::chains::kusama::TOKEN_ID), + Some(crate::chains::kusama::polkadot_to_kusama_conversion_rate_params()), + Some(crate::chains::polkadot::kusama_to_polkadot_conversion_rate_params()), + ) +} + +/// Update Kusama -> Polkadot conversion rate, stored in Polkadot runtime storage. +pub(crate) async fn update_kusama_to_polkadot_conversion_rate( + client: Client, + signer: ::AccountKeyPair, + updated_rate: f64, +) -> anyhow::Result<()> { + let genesis_hash = *client.genesis_hash(); + let signer_id = (*signer.public().as_array_ref()).into(); + client + .submit_signed_extrinsic(signer_id, move |_, transaction_nonce| { + Bytes( + Polkadot::sign_transaction( + genesis_hash, + &signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + relay_polkadot_client::runtime::Call::BridgeKusamaMessages( + relay_polkadot_client::runtime::BridgeKusamaMessagesCall::update_pallet_parameter( + relay_polkadot_client::runtime::BridgeKusamaMessagesParameter::KusamaToPolkadotConversionRate( + sp_runtime::FixedU128::from_float(updated_rate), + ) + ) + ), + transaction_nonce, + ), + ) + .encode(), + ) + }) + .await + .map(drop) + .map_err(|err| anyhow::format_err!("{:?}", err)) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs index 9a6185b4fc7d9618e52188ae40a56878161f1e52..2d873a24ba7af3090d499c547ee21f4d1348ef41 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs @@ -19,14 +19,26 @@ use crate::cli::{ bridge, encode_call::{self, Call, CliEncodeCall}, - encode_message, send_message, CliChain, + encode_message, + send_message::{self, DispatchFeePayment}, + CliChain, }; +use anyhow::anyhow; use bp_message_dispatch::{CallOrigin, MessagePayload}; use codec::Decode; use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight}; use relay_rialto_client::Rialto; +use sp_core::storage::StorageKey; +use sp_runtime::FixedU128; use sp_version::RuntimeVersion; +// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we +// want to test our code that is intended to work with real-value chains. So to keep it close to +// 1:1, we'll be treating Rialto as BTC and Millau as wBTC (only in relayer). + +/// The identifier of token, which value is associated with Rialto token value by relayer. +pub(crate) const ASSOCIATED_TOKEN_ID: &str = crate::chains::polkadot::TOKEN_ID; + impl CliEncodeCall for Rialto { fn max_extrinsic_size() -> u32 { bp_rialto::max_extrinsic_size() @@ -35,29 +47,32 @@ impl CliEncodeCall for Rialto { fn encode_call(call: &Call) -> anyhow::Result { Ok(match call { Call::Raw { data } => Decode::decode(&mut &*data.0)?, - Call::Remark { remark_payload, .. } => rialto_runtime::Call::System(rialto_runtime::SystemCall::remark( - remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - )), - Call::Transfer { recipient, amount } => { - rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer(recipient.raw_id(), amount.0)) - } - Call::BridgeSendMessage { - lane, - payload, - fee, - bridge_instance_index, - } => match *bridge_instance_index { - bridge::RIALTO_TO_MILLAU_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - rialto_runtime::Call::BridgeMillauMessages(rialto_runtime::MessagesCall::send_message( - lane.0, payload, fee.0, - )) - } - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, + Call::Remark { remark_payload, .. } => + rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { + remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + }), + Call::Transfer { recipient, amount } => + rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer { + dest: recipient.raw_id().into(), + value: amount.0, + }), + Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => + match *bridge_instance_index { + bridge::RIALTO_TO_MILLAU_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + rialto_runtime::Call::BridgeMillauMessages( + rialto_runtime::MessagesCall::send_message { + lane_id: lane.0, + payload, + delivery_and_dispatch_fee: fee.0, + }, + ) + }, + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, }) } @@ -70,7 +85,12 @@ impl CliChain for Rialto { const RUNTIME_VERSION: RuntimeVersion = rialto_runtime::VERSION; type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload>; + type MessagePayload = MessagePayload< + bp_rialto::AccountId, + bp_millau::AccountSigner, + bp_millau::Signature, + Vec, + >; fn ss58_format() -> u16 { rialto_runtime::SS58Prefix::get() as u16 @@ -80,10 +100,12 @@ impl CliChain for Rialto { bp_rialto::max_extrinsic_weight() } - fn encode_message(message: encode_message::MessagePayload) -> Result { + fn encode_message( + message: encode_message::MessagePayload, + ) -> anyhow::Result { match message { encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| format!("Failed to decode Rialto's MessagePayload: {:?}", e)), + .map_err(|e| anyhow!("Failed to decode Rialto's MessagePayload: {:?}", e)), encode_message::MessagePayload::Call { mut call, mut sender } => { type Source = Rialto; type Target = relay_millau_client::Millau; @@ -91,12 +113,29 @@ impl CliChain for Rialto { sender.enforce_chain::(); let spec_version = Target::RUNTIME_VERSION.spec_version; let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::(&mut call, bridge::RIALTO_TO_MILLAU_INDEX); - let call = Target::encode_call(&call).map_err(|e| e.to_string())?; + encode_call::preprocess_call::( + &mut call, + bridge::RIALTO_TO_MILLAU_INDEX, + ); + let call = Target::encode_call(&call)?; let weight = call.get_dispatch_info().weight; - Ok(send_message::message_payload(spec_version, weight, origin, &call)) - } + Ok(send_message::message_payload( + spec_version, + weight, + origin, + &call, + DispatchFeePayment::AtSourceChain, + )) + }, } } } + +/// Storage key and initial value of Millau -> Rialto conversion rate. +pub(crate) fn millau_to_rialto_conversion_rate_params() -> (StorageKey, FixedU128) { + ( + StorageKey(rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec()), + rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE, + ) +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs index 39295c89433e84a1d9d50e02534a8ec85c64461f..7e76f403c55aae1a9e26cb91b48e220b5874b6d3 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs @@ -16,41 +16,72 @@ //! Rialto-to-Millau headers sync entrypoint. -use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use codec::Encode; +use sp_core::{Bytes, Pair}; use bp_header_chain::justification::GrandpaJustification; -use codec::Encode; use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; use relay_rialto_client::{Rialto, SyncHeader as RialtoSyncHeader}; -use relay_substrate_client::{Chain, TransactionSignScheme}; -use sp_core::{Bytes, Pair}; +use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; +use substrate_relay_helper::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, +}; /// Rialto-to-Millau finality sync pipeline. -pub(crate) type RialtoFinalityToMillau = SubstrateFinalityToSubstrate; +pub(crate) type FinalityPipelineRialtoFinalityToMillau = + SubstrateFinalityToSubstrate; + +#[derive(Clone, Debug)] +pub struct RialtoFinalityToMillau { + finality_pipeline: FinalityPipelineRialtoFinalityToMillau, +} + +impl RialtoFinalityToMillau { + pub fn new(target_client: Client, target_sign: MillauSigningParams) -> Self { + Self { + finality_pipeline: FinalityPipelineRialtoFinalityToMillau::new( + target_client, + target_sign, + ), + } + } +} impl SubstrateFinalitySyncPipeline for RialtoFinalityToMillau { - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + type FinalitySyncPipeline = FinalityPipelineRialtoFinalityToMillau; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; type TargetChain = Millau; fn transactions_author(&self) -> bp_millau::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.finality_pipeline.target_sign.public().as_array_ref()).into() } fn make_submit_finality_proof_transaction( &self, - transaction_nonce: ::Index, + era: bp_runtime::TransactionEraOf, + transaction_nonce: IndexOf, header: RialtoSyncHeader, proof: GrandpaJustification, ) -> Bytes { - let call = millau_runtime::BridgeGrandpaRialtoCall::< + let call = millau_runtime::BridgeGrandpaCall::< millau_runtime::Runtime, millau_runtime::RialtoGrandpaInstance, - >::submit_finality_proof(header.into_inner(), proof) + >::submit_finality_proof { + finality_target: Box::new(header.into_inner()), + justification: proof, + } .into(); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + let genesis_hash = *self.finality_pipeline.target_client.genesis_hash(); + let transaction = Millau::sign_transaction( + genesis_hash, + &self.finality_pipeline.target_sign, + era, + UnsignedTransaction::new(call, transaction_nonce), + ); Bytes(transaction.encode()) } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs index 89f9dd7e997edffa57df4f1a48badfb775896531..774da017df0c1263a965ccaebf350ceef5b77d2a 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs @@ -16,64 +16,99 @@ //! Rialto-to-Millau messages sync entrypoint. -use crate::messages_lane::{ - select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, -}; -use crate::messages_source::SubstrateMessagesSource; -use crate::messages_target::SubstrateMessagesTarget; +use std::ops::RangeInclusive; -use bp_messages::MessageNonce; -use bp_runtime::{MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; use frame_support::dispatch::GetDispatchInfo; -use messages_relay::message_lane::MessageLane; -use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - metrics::{FloatStorageValueMetric, StorageProofOverheadMetric}, - Chain, TransactionSignScheme, -}; use sp_core::{Bytes, Pair}; -use std::{ops::RangeInclusive, time::Duration}; + +use bp_messages::MessageNonce; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use frame_support::weights::Weight; +use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy}; +use relay_millau_client::{ + HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams, +}; +use relay_rialto_client::{ + HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams, +}; +use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; +use substrate_relay_helper::{ + messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, + SubstrateMessageLane, SubstrateMessageLaneToSubstrate, + }, + messages_source::SubstrateMessagesSource, + messages_target::SubstrateMessagesTarget, + STALL_TIMEOUT, +}; /// Rialto-to-Millau message lane. -pub type RialtoMessagesToMillau = +pub type MessageLaneRialtoMessagesToMillau = SubstrateMessageLaneToSubstrate; +#[derive(Clone)] +pub struct RialtoMessagesToMillau { + message_lane: MessageLaneRialtoMessagesToMillau, +} + impl SubstrateMessageLane for RialtoMessagesToMillau { - const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD; + type MessageLane = MessageLaneRialtoMessagesToMillau; + + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = + bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD; const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_GENERATED_NONCE_METHOD; - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD; - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD; const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = bp_rialto::FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD; - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = + bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = + bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME; + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME; + + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = + bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT; type SourceChain = Rialto; type TargetChain = Millau; fn source_transactions_author(&self) -> bp_rialto::AccountId { - (*self.source_sign.public().as_array_ref()).into() + (*self.message_lane.source_sign.public().as_array_ref()).into() } fn make_messages_receiving_proof_transaction( &self, - transaction_nonce: ::Index, + best_block_id: RialtoHeaderId, + transaction_nonce: IndexOf, _generated_at_block: MillauHeaderId, - proof: ::MessagesReceivingProof, + proof: ::MessagesReceivingProof, ) -> Bytes { let (relayers_state, proof) = proof; let call: rialto_runtime::Call = - rialto_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into(); + rialto_runtime::MessagesCall::receive_messages_delivery_proof { proof, relayers_state } + .into(); let call_weight = call.get_dispatch_info().weight; - let genesis_hash = *self.source_client.genesis_hash(); - let transaction = Rialto::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.source_client.genesis_hash(); + let transaction = Rialto::sign_transaction( + genesis_hash, + &self.message_lane.source_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.source_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Millau -> Rialto confirmation transaction. Weight: {}/{}, size: {}/{}", @@ -86,33 +121,38 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { } fn target_transactions_author(&self) -> bp_millau::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.message_lane.target_sign.public().as_array_ref()).into() } fn make_messages_delivery_transaction( &self, - transaction_nonce: ::Index, + best_block_id: MillauHeaderId, + transaction_nonce: IndexOf, _generated_at_header: RialtoHeaderId, _nonces: RangeInclusive, - proof: ::MessagesProof, + proof: ::MessagesProof, ) -> Bytes { let (dispatch_weight, proof) = proof; - let FromBridgedChainMessagesProof { - ref nonces_start, - ref nonces_end, - .. - } = proof; + let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof; let messages_count = nonces_end - nonces_start + 1; - let call: millau_runtime::Call = millau_runtime::MessagesCall::receive_messages_proof( - self.relayer_id_at_source.clone(), + let call: millau_runtime::Call = millau_runtime::MessagesCall::receive_messages_proof { + relayer_id_at_bridged_chain: self.message_lane.relayer_id_at_source.clone(), proof, - messages_count as _, + messages_count: messages_count as _, dispatch_weight, - ) + } .into(); let call_weight = call.get_dispatch_info().weight; - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.target_client.genesis_hash(); + let transaction = Millau::sign_transaction( + genesis_hash, + &self.message_lane.target_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.target_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Rialto -> Millau delivery transaction. Weight: {}/{}, size: {}/{}", @@ -126,34 +166,51 @@ impl SubstrateMessageLane for RialtoMessagesToMillau { } /// Rialto node as messages source. -type RialtoSourceClient = - SubstrateMessagesSource; +type RialtoSourceClient = SubstrateMessagesSource; /// Millau node as messages target. -type MillauTargetClient = - SubstrateMessagesTarget; +type MillauTargetClient = SubstrateMessagesTarget; /// Run Rialto-to-Millau messages sync. pub async fn run( - params: MessagesRelayParams, -) -> Result<(), String> { - let stall_timeout = Duration::from_secs(5 * 60); + params: MessagesRelayParams< + Rialto, + RialtoSigningParams, + Millau, + MillauSigningParams, + MixStrategy, + >, +) -> anyhow::Result<()> { + let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout( + params.source_transactions_mortality, + params.target_transactions_mortality, + Rialto::AVERAGE_BLOCK_INTERVAL, + Millau::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); let relayer_id_at_rialto = (*params.source_sign.public().as_array_ref()).into(); let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = RialtoMessagesToMillau { - source_client: source_client.clone(), - source_sign: params.source_sign, - target_client: params.target_client.clone(), - target_sign: params.target_sign, - relayer_id_at_source: relayer_id_at_rialto, + message_lane: SubstrateMessageLaneToSubstrate { + source_client: source_client.clone(), + source_sign: params.source_sign, + source_transactions_mortality: params.source_transactions_mortality, + target_client: target_client.clone(), + target_sign: params.target_sign, + target_transactions_mortality: params.target_transactions_mortality, + relayer_id_at_source: relayer_id_at_rialto, + }, }; // 2/3 is reserved for proofs and tx overhead let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() / 3; let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( + select_delivery_transaction_limits::< + pallet_bridge_messages::weights::RialtoWeight, + >( bp_millau::max_extrinsic_weight(), bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, ); @@ -164,13 +221,22 @@ pub async fn run( Rialto relayer account id: {:?}\n\t\ Max messages in single transaction: {}\n\t\ Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}", - lane.relayer_id_at_source, + Max messages weight in single transaction: {}\n\t\ + Tx mortality: {:?}/{:?}\n\t\ + Stall timeout: {:?}", + lane.message_lane.relayer_id_at_source, max_messages_in_single_batch, max_messages_size_in_single_batch, max_messages_weight_in_single_batch, + params.source_transactions_mortality, + params.target_transactions_mortality, + stall_timeout, ); + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -179,58 +245,80 @@ pub async fn run( reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, stall_timeout, delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - max_unconfirmed_nonces_at_target: bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_unrewarded_relayer_entries_at_target: + bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: + bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, max_messages_in_single_batch, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, - relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, + relay_strategy: params.relay_strategy, }, }, RialtoSourceClient::new( source_client.clone(), lane.clone(), lane_id, - MILLAU_CHAIN_ID, params.target_to_source_headers_relay, ), MillauTargetClient::new( - params.target_client, + target_client, lane, lane_id, - RIALTO_CHAIN_ID, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - relay_utils::relay_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - RialtoMessagesToMillau, - >(&lane_id)), - params.metrics_params, - ) - .standalone_metric(|registry, prefix| { - StorageProofOverheadMetric::new( - registry, - prefix, - source_client.clone(), - "rialto_storage_proof_overhead".into(), - "Rialto storage proof overhead".into(), - ) - })? - .standalone_metric(|registry, prefix| { - FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( - registry, - prefix, - source_client, - sp_core::storage::StorageKey( - rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec(), - ), - Some(rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE), - "rialto_millau_to_rialto_conversion_rate".into(), - "Millau to Rialto tokens conversion rate (used by Millau)".into(), - ) - })? - .into_params(), + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await + .map_err(Into::into) +} + +/// Create standalone metrics for the Rialto -> Millau messages loop. +pub(crate) fn standalone_metrics( + source_client: Client, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( + source_client, + target_client, + Some(crate::chains::rialto::ASSOCIATED_TOKEN_ID), + Some(crate::chains::millau::ASSOCIATED_TOKEN_ID), + Some(crate::chains::millau::rialto_to_millau_conversion_rate_params()), + Some(crate::chains::rialto::millau_to_rialto_conversion_rate_params()), + ) +} + +/// Update Millau -> Rialto conversion rate, stored in Rialto runtime storage. +pub(crate) async fn update_millau_to_rialto_conversion_rate( + client: Client, + signer: ::AccountKeyPair, + updated_rate: f64, +) -> anyhow::Result<()> { + let genesis_hash = *client.genesis_hash(); + let signer_id = (*signer.public().as_array_ref()).into(); + client + .submit_signed_extrinsic(signer_id, move |_, transaction_nonce| { + Bytes( + Rialto::sign_transaction( + genesis_hash, + &signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + rialto_runtime::MessagesCall::update_pallet_parameter { + parameter: rialto_runtime::millau_messages::RialtoToMillauMessagesParameter::MillauToRialtoConversionRate( + sp_runtime::FixedU128::from_float(updated_rate), + ), + } + .into(), + transaction_nonce, + ), + ) + .encode(), + ) + }) + .await + .map(drop) + .map_err(|err| anyhow::format_err!("{:?}", err)) } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..edd4ca36285406c9b69b8b7d34793f4ae30ea456 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto parachain specification for CLI. + +use crate::cli::{ + encode_call::{Call, CliEncodeCall}, + encode_message, CliChain, +}; +use bp_message_dispatch::MessagePayload; +use codec::Decode; +use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight}; +use relay_rialto_parachain_client::RialtoParachain; +use sp_version::RuntimeVersion; + +impl CliEncodeCall for RialtoParachain { + fn max_extrinsic_size() -> u32 { + bp_rialto_parachain::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Raw { data } => Decode::decode(&mut &*data.0)?, + Call::Remark { remark_payload, .. } => rialto_parachain_runtime::Call::System( + rialto_parachain_runtime::SystemCall::remark { + remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + }, + ), + Call::Transfer { recipient, amount } => rialto_parachain_runtime::Call::Balances( + rialto_parachain_runtime::BalancesCall::transfer { + dest: recipient.raw_id().into(), + value: amount.0, + }, + ), + Call::BridgeSendMessage { .. } => + anyhow::bail!("Bridge messages are not (yet) supported here",), + }) + } + + fn get_dispatch_info(call: &rialto_parachain_runtime::Call) -> anyhow::Result { + Ok(call.get_dispatch_info()) + } +} + +impl CliChain for RialtoParachain { + const RUNTIME_VERSION: RuntimeVersion = rialto_parachain_runtime::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = MessagePayload< + bp_rialto_parachain::AccountId, + bp_millau::AccountSigner, + bp_millau::Signature, + Vec, + >; + + fn ss58_format() -> u16 { + rialto_parachain_runtime::SS58Prefix::get() as u16 + } + + fn max_extrinsic_weight() -> Weight { + bp_rialto_parachain::max_extrinsic_weight() + } + + fn encode_message( + _message: encode_message::MessagePayload, + ) -> anyhow::Result { + anyhow::bail!("Not supported") + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs index ec94450a63de5f3acefe85f89af7d52ffa407998..4df60f89faa213679b60613f93279cba2100bb99 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use anyhow::anyhow; use codec::Decode; use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use relay_rococo_client::Rococo; @@ -27,7 +28,7 @@ use crate::cli::{ /// Weight of the `system::remark` call at Rococo. /// -/// This weight is larger (x2) than actual weight at current Rooco runtime to avoid unsuccessful +/// This weight is larger (x2) than actual weight at current Rococo runtime to avoid unsuccessful /// calls in the future. But since it is used only in tests (and on test chains), this is ok. pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000; @@ -38,41 +39,41 @@ impl CliEncodeCall for Rococo { fn encode_call(call: &Call) -> anyhow::Result { Ok(match call { - Call::Remark { remark_payload, .. } => { - relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark( + Call::Remark { remark_payload, .. } => relay_rococo_client::runtime::Call::System( + relay_rococo_client::runtime::SystemCall::remark( remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - )) - } - Call::BridgeSendMessage { - lane, - payload, - fee, - bridge_instance_index, - } => match *bridge_instance_index { - bridge::ROCOCO_TO_WOCOCO_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - relay_rococo_client::runtime::Call::BridgeMessagesWococo( - relay_rococo_client::runtime::BridgeMessagesWococoCall::send_message(lane.0, payload, fee.0), - ) - } - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index ), - }, + ), + Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => + match *bridge_instance_index { + bridge::ROCOCO_TO_WOCOCO_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + relay_rococo_client::runtime::Call::BridgeMessagesWococo( + relay_rococo_client::runtime::BridgeMessagesWococoCall::send_message( + lane.0, payload, fee.0, + ), + ) + }, + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, _ => anyhow::bail!("The call is not supported"), }) } - fn get_dispatch_info(call: &relay_rococo_client::runtime::Call) -> anyhow::Result { + fn get_dispatch_info( + call: &relay_rococo_client::runtime::Call, + ) -> anyhow::Result { match *call { - relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark(_)) => { - Ok(DispatchInfo { - weight: SYSTEM_REMARK_CALL_WEIGHT, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }) - } + relay_rococo_client::runtime::Call::System( + relay_rococo_client::runtime::SystemCall::remark(_), + ) => Ok(DispatchInfo { + weight: SYSTEM_REMARK_CALL_WEIGHT, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }), _ => anyhow::bail!("Unsupported Rococo call: {:?}", call), } } @@ -92,7 +93,9 @@ impl CliChain for Rococo { bp_wococo::max_extrinsic_weight() } - fn encode_message(_message: encode_message::MessagePayload) -> Result { - Err("Sending messages from Rococo is not yet supported.".into()) + fn encode_message( + _message: encode_message::MessagePayload, + ) -> anyhow::Result { + Err(anyhow!("Sending messages from Rococo is not yet supported.")) } } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs index c7f60100f13c12f938fc76bc574986343f2849a5..ec98cec1ec1e9e4adf403466e73d71093c3b1933 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs @@ -16,56 +16,88 @@ //! Rococo-to-Wococo headers sync entrypoint. -use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY; -use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use codec::Encode; +use sp_core::{Bytes, Pair}; use bp_header_chain::justification::GrandpaJustification; -use codec::Encode; use relay_rococo_client::{Rococo, SyncHeader as RococoSyncHeader}; -use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; use relay_utils::metrics::MetricsParams; use relay_wococo_client::{SigningParams as WococoSigningParams, Wococo}; -use sp_core::{Bytes, Pair}; +use substrate_relay_helper::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, +}; + +use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY; /// Rococo-to-Wococo finality sync pipeline. -pub(crate) type RococoFinalityToWococo = SubstrateFinalityToSubstrate; +pub(crate) type FinalityPipelineRococoFinalityToWococo = + SubstrateFinalityToSubstrate; + +#[derive(Clone, Debug)] +pub(crate) struct RococoFinalityToWococo { + finality_pipeline: FinalityPipelineRococoFinalityToWococo, +} + +impl RococoFinalityToWococo { + pub fn new(target_client: Client, target_sign: WococoSigningParams) -> Self { + Self { + finality_pipeline: FinalityPipelineRococoFinalityToWococo::new( + target_client, + target_sign, + ), + } + } +} impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo { - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; + type FinalitySyncPipeline = FinalityPipelineRococoFinalityToWococo; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; type TargetChain = Wococo; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::(params) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn start_relay_guards(&self) { relay_substrate_client::guard::abort_on_spec_version_change( - self.target_client.clone(), + self.finality_pipeline.target_client.clone(), bp_wococo::VERSION.spec_version, ); relay_substrate_client::guard::abort_when_account_balance_decreased( - self.target_client.clone(), + self.finality_pipeline.target_client.clone(), self.transactions_author(), MAXIMAL_BALANCE_DECREASE_PER_DAY, ); } fn transactions_author(&self) -> bp_wococo::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.finality_pipeline.target_sign.public().as_array_ref()).into() } fn make_submit_finality_proof_transaction( &self, - transaction_nonce: ::Index, + era: bp_runtime::TransactionEraOf, + transaction_nonce: IndexOf, header: RococoSyncHeader, proof: GrandpaJustification, ) -> Bytes { let call = relay_wococo_client::runtime::Call::BridgeGrandpaRococo( - relay_wococo_client::runtime::BridgeGrandpaRococoCall::submit_finality_proof(header.into_inner(), proof), + relay_wococo_client::runtime::BridgeGrandpaRococoCall::submit_finality_proof( + Box::new(header.into_inner()), + proof, + ), + ); + let genesis_hash = *self.finality_pipeline.target_client.genesis_hash(); + let transaction = Wococo::sign_transaction( + genesis_hash, + &self.finality_pipeline.target_sign, + era, + UnsignedTransaction::new(call, transaction_nonce), ); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Wococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); Bytes(transaction.encode()) } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs index be5f91116ec337daef2534da453312b2adb9a1dc..d6c9040e1277bc1238cc970b17cf7753c5d9c43b 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs @@ -16,53 +16,82 @@ //! Rococo-to-Wococo messages sync entrypoint. -use crate::messages_lane::{ - select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, -}; -use crate::messages_source::SubstrateMessagesSource; -use crate::messages_target::SubstrateMessagesTarget; +use std::ops::RangeInclusive; -use bp_messages::MessageNonce; -use bp_runtime::{ROCOCO_CHAIN_ID, WOCOCO_CHAIN_ID}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; -use messages_relay::message_lane::MessageLane; -use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams}; -use relay_substrate_client::{metrics::StorageProofOverheadMetric, Chain, TransactionSignScheme}; -use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo}; use sp_core::{Bytes, Pair}; -use std::{ops::RangeInclusive, time::Duration}; + +use bp_messages::MessageNonce; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use frame_support::weights::Weight; +use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy}; +use relay_rococo_client::{ + HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams, +}; +use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; +use relay_wococo_client::{ + HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo, +}; +use substrate_relay_helper::{ + messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, + SubstrateMessageLane, SubstrateMessageLaneToSubstrate, + }, + messages_source::SubstrateMessagesSource, + messages_target::SubstrateMessagesTarget, + STALL_TIMEOUT, +}; /// Rococo-to-Wococo message lane. -pub type RococoMessagesToWococo = +pub type MessageLaneRococoMessagesToWococo = SubstrateMessageLaneToSubstrate; +#[derive(Clone)] +pub struct RococoMessagesToWococo { + message_lane: MessageLaneRococoMessagesToWococo, +} + impl SubstrateMessageLane for RococoMessagesToWococo { - const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD; + type MessageLane = MessageLaneRococoMessagesToWococo; + + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = + bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD; const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = bp_wococo::TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD; - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_wococo::TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD; - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_rococo::FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD; const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = bp_rococo::FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD; - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rococo::FROM_ROCOCO_UNREWARDED_RELAYERS_STATE; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = + bp_rococo::FROM_ROCOCO_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = + bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_rococo::WITH_WOCOCO_MESSAGES_PALLET_NAME; + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_wococo::WITH_ROCOCO_MESSAGES_PALLET_NAME; + + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = + bp_wococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT; type SourceChain = Rococo; type TargetChain = Wococo; fn source_transactions_author(&self) -> bp_rococo::AccountId { - (*self.source_sign.public().as_array_ref()).into() + (*self.message_lane.source_sign.public().as_array_ref()).into() } fn make_messages_receiving_proof_transaction( &self, - transaction_nonce: ::Index, + best_block_id: RococoHeaderId, + transaction_nonce: IndexOf, _generated_at_block: WococoHeaderId, - proof: ::MessagesReceivingProof, + proof: ::MessagesReceivingProof, ) -> Bytes { let (relayers_state, proof) = proof; let call = relay_rococo_client::runtime::Call::BridgeMessagesWococo( @@ -71,8 +100,16 @@ impl SubstrateMessageLane for RococoMessagesToWococo { relayers_state, ), ); - let genesis_hash = *self.source_client.genesis_hash(); - let transaction = Rococo::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.source_client.genesis_hash(); + let transaction = Rococo::sign_transaction( + genesis_hash, + &self.message_lane.source_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.source_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Wococo -> Rococo confirmation transaction. Weight: /{}, size: {}/{}", @@ -84,34 +121,39 @@ impl SubstrateMessageLane for RococoMessagesToWococo { } fn target_transactions_author(&self) -> bp_wococo::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.message_lane.target_sign.public().as_array_ref()).into() } fn make_messages_delivery_transaction( &self, - transaction_nonce: ::Index, + best_block_id: WococoHeaderId, + transaction_nonce: IndexOf, _generated_at_header: RococoHeaderId, _nonces: RangeInclusive, - proof: ::MessagesProof, + proof: ::MessagesProof, ) -> Bytes { let (dispatch_weight, proof) = proof; - let FromBridgedChainMessagesProof { - ref nonces_start, - ref nonces_end, - .. - } = proof; + let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof; let messages_count = nonces_end - nonces_start + 1; let call = relay_wococo_client::runtime::Call::BridgeMessagesRococo( relay_wococo_client::runtime::BridgeMessagesRococoCall::receive_messages_proof( - self.relayer_id_at_source.clone(), + self.message_lane.relayer_id_at_source.clone(), proof, messages_count as _, dispatch_weight, ), ); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Wococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.target_client.genesis_hash(); + let transaction = Wococo::sign_transaction( + genesis_hash, + &self.message_lane.target_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.target_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Rococo -> Wococo delivery transaction. Weight: /{}, size: {}/{}", @@ -124,28 +166,43 @@ impl SubstrateMessageLane for RococoMessagesToWococo { } /// Rococo node as messages source. -type RococoSourceClient = - SubstrateMessagesSource; +type RococoSourceClient = SubstrateMessagesSource; /// Wococo node as messages target. -type WococoTargetClient = - SubstrateMessagesTarget; +type WococoTargetClient = SubstrateMessagesTarget; /// Run Rococo-to-Wococo messages sync. pub async fn run( - params: MessagesRelayParams, -) -> Result<(), String> { - let stall_timeout = Duration::from_secs(5 * 60); + params: MessagesRelayParams< + Rococo, + RococoSigningParams, + Wococo, + WococoSigningParams, + MixStrategy, + >, +) -> anyhow::Result<()> { + let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout( + params.source_transactions_mortality, + params.target_transactions_mortality, + Rococo::AVERAGE_BLOCK_INTERVAL, + Wococo::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); let relayer_id_at_rococo = (*params.source_sign.public().as_array_ref()).into(); let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = RococoMessagesToWococo { - source_client: source_client.clone(), - source_sign: params.source_sign, - target_client: params.target_client.clone(), - target_sign: params.target_sign, - relayer_id_at_source: relayer_id_at_rococo, + message_lane: SubstrateMessageLaneToSubstrate { + source_client: source_client.clone(), + source_sign: params.source_sign, + source_transactions_mortality: params.source_transactions_mortality, + target_client: target_client.clone(), + target_sign: params.target_sign, + target_transactions_mortality: params.target_transactions_mortality, + relayer_id_at_source: relayer_id_at_rococo, + }, }; // 2/3 is reserved for proofs and tx overhead @@ -153,14 +210,14 @@ pub async fn run( // we don't know exact weights of the Wococo runtime. So to guess weights we'll be using // weights from Rialto and then simply dividing it by x2. let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( + select_delivery_transaction_limits::< + pallet_bridge_messages::weights::RialtoWeight, + >( bp_wococo::max_extrinsic_weight(), bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, ); - let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = ( - max_messages_in_single_batch / 2, - max_messages_weight_in_single_batch / 2, - ); + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + (max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2); log::info!( target: "bridge", @@ -168,13 +225,22 @@ pub async fn run( Rococo relayer account id: {:?}\n\t\ Max messages in single transaction: {}\n\t\ Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}", - lane.relayer_id_at_source, + Max messages weight in single transaction: {}\n\t\ + Tx mortality: {:?}/{:?}\n\t\ + Stall timeout: {:?}", + lane.message_lane.relayer_id_at_source, max_messages_in_single_batch, max_messages_size_in_single_batch, max_messages_weight_in_single_batch, + params.source_transactions_mortality, + params.target_transactions_mortality, + stall_timeout, ); + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -183,45 +249,47 @@ pub async fn run( reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, stall_timeout, delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - max_unconfirmed_nonces_at_target: bp_wococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_unrewarded_relayer_entries_at_target: + bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: + bp_wococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, max_messages_in_single_batch, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, - relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, + relay_strategy: params.relay_strategy, }, }, RococoSourceClient::new( source_client.clone(), lane.clone(), lane_id, - WOCOCO_CHAIN_ID, params.target_to_source_headers_relay, ), WococoTargetClient::new( - params.target_client, + target_client, lane, lane_id, - ROCOCO_CHAIN_ID, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - relay_utils::relay_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - RococoMessagesToWococo, - >(&lane_id)), - params.metrics_params, - ) - .standalone_metric(|registry, prefix| { - StorageProofOverheadMetric::new( - registry, - prefix, - source_client.clone(), - "rococo_storage_proof_overhead".into(), - "Rococo storage proof overhead".into(), - ) - })? - .into_params(), + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await + .map_err(Into::into) +} + +/// Create standalone metrics for the Rococo -> Wococo messages loop. +pub(crate) fn standalone_metrics( + source_client: Client, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( + source_client, + target_client, + None, + None, + None, + None, + ) } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs index 27621472d6d999e68bc3b6e6499880f70334a3d5..a42e4805512ca326802e8d4ad8c519f760f55091 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs @@ -17,6 +17,7 @@ //! Westend chain specification for CLI. use crate::cli::{encode_message, CliChain}; +use anyhow::anyhow; use frame_support::weights::Weight; use relay_westend_client::Westend; use sp_version::RuntimeVersion; @@ -35,7 +36,9 @@ impl CliChain for Westend { 0 } - fn encode_message(_message: encode_message::MessagePayload) -> Result { - Err("Sending messages from Westend is not yet supported.".into()) + fn encode_message( + _message: encode_message::MessagePayload, + ) -> anyhow::Result { + Err(anyhow!("Sending messages from Westend is not yet supported.")) } } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs index 1523dc1be584cfafd77e158be71e024cf393091a..211aa9da9bfe3af72ee86593b8230981e402a9b1 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs @@ -16,46 +16,77 @@ //! Westend-to-Millau headers sync entrypoint. -use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use codec::Encode; +use sp_core::{Bytes, Pair}; use bp_header_chain::justification::GrandpaJustification; -use codec::Encode; use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; -use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; use relay_utils::metrics::MetricsParams; use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend}; -use sp_core::{Bytes, Pair}; +use substrate_relay_helper::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, +}; /// Westend-to-Millau finality sync pipeline. -pub(crate) type WestendFinalityToMillau = SubstrateFinalityToSubstrate; +pub(crate) type FinalityPipelineWestendFinalityToMillau = + SubstrateFinalityToSubstrate; + +#[derive(Clone, Debug)] +pub(crate) struct WestendFinalityToMillau { + finality_pipeline: FinalityPipelineWestendFinalityToMillau, +} + +impl WestendFinalityToMillau { + pub fn new(target_client: Client, target_sign: MillauSigningParams) -> Self { + Self { + finality_pipeline: FinalityPipelineWestendFinalityToMillau::new( + target_client, + target_sign, + ), + } + } +} impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau { - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; + type FinalitySyncPipeline = FinalityPipelineWestendFinalityToMillau; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; type TargetChain = Millau; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::(params) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn transactions_author(&self) -> bp_millau::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.finality_pipeline.target_sign.public().as_array_ref()).into() } fn make_submit_finality_proof_transaction( &self, - transaction_nonce: ::Index, + era: bp_runtime::TransactionEraOf, + transaction_nonce: IndexOf, header: WestendSyncHeader, proof: GrandpaJustification, ) -> Bytes { - let call = millau_runtime::BridgeGrandpaWestendCall::< + let call = millau_runtime::BridgeGrandpaCall::< millau_runtime::Runtime, millau_runtime::WestendGrandpaInstance, - >::submit_finality_proof(header.into_inner(), proof) + >::submit_finality_proof { + finality_target: Box::new(header.into_inner()), + justification: proof, + } .into(); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + let genesis_hash = *self.finality_pipeline.target_client.genesis_hash(); + let transaction = Millau::sign_transaction( + genesis_hash, + &self.finality_pipeline.target_sign, + era, + UnsignedTransaction::new(call, transaction_nonce), + ); Bytes(transaction.encode()) } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs index 9b944d781685db098e2d41cf900990eada89c056..328397d14ba7c8cc771f8be21df1ba9cf03f7767 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . +use anyhow::anyhow; use codec::Decode; use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; use relay_wococo_client::Wococo; @@ -32,41 +33,41 @@ impl CliEncodeCall for Wococo { fn encode_call(call: &Call) -> anyhow::Result { Ok(match call { - Call::Remark { remark_payload, .. } => { - relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark( + Call::Remark { remark_payload, .. } => relay_wococo_client::runtime::Call::System( + relay_wococo_client::runtime::SystemCall::remark( remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - )) - } - Call::BridgeSendMessage { - lane, - payload, - fee, - bridge_instance_index, - } => match *bridge_instance_index { - bridge::WOCOCO_TO_ROCOCO_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - relay_wococo_client::runtime::Call::BridgeMessagesRococo( - relay_wococo_client::runtime::BridgeMessagesRococoCall::send_message(lane.0, payload, fee.0), - ) - } - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index ), - }, + ), + Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => + match *bridge_instance_index { + bridge::WOCOCO_TO_ROCOCO_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + relay_wococo_client::runtime::Call::BridgeMessagesRococo( + relay_wococo_client::runtime::BridgeMessagesRococoCall::send_message( + lane.0, payload, fee.0, + ), + ) + }, + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, _ => anyhow::bail!("The call is not supported"), }) } - fn get_dispatch_info(call: &relay_wococo_client::runtime::Call) -> anyhow::Result { + fn get_dispatch_info( + call: &relay_wococo_client::runtime::Call, + ) -> anyhow::Result { match *call { - relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark(_)) => { - Ok(DispatchInfo { - weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }) - } + relay_wococo_client::runtime::Call::System( + relay_wococo_client::runtime::SystemCall::remark(_), + ) => Ok(DispatchInfo { + weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT, + class: DispatchClass::Normal, + pays_fee: Pays::Yes, + }), _ => anyhow::bail!("Unsupported Rococo call: {:?}", call), } } @@ -86,7 +87,9 @@ impl CliChain for Wococo { bp_wococo::max_extrinsic_weight() } - fn encode_message(_message: encode_message::MessagePayload) -> Result { - Err("Sending messages from Wococo is not yet supported.".into()) + fn encode_message( + _message: encode_message::MessagePayload, + ) -> anyhow::Result { + Err(anyhow!("Sending messages from Wococo is not yet supported.")) } } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs index 8ee30d3ff492581c08ea95d61684d34d234686e0..fe17976d06a86d53e143413e20ce29cf6c93cb2a 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs @@ -16,15 +16,17 @@ //! Wococo-to-Rococo headers sync entrypoint. -use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use codec::Encode; +use sp_core::{Bytes, Pair}; use bp_header_chain::justification::GrandpaJustification; -use codec::Encode; use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams}; -use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; use relay_utils::metrics::MetricsParams; use relay_wococo_client::{SyncHeader as WococoSyncHeader, Wococo}; -use sp_core::{Bytes, Pair}; +use substrate_relay_helper::finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, +}; /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat /// relay as gone wild. @@ -34,44 +36,73 @@ use sp_core::{Bytes, Pair}; pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_rococo::Balance = 1_500_000_000_000_000; /// Wococo-to-Rococo finality sync pipeline. -pub(crate) type WococoFinalityToRococo = SubstrateFinalityToSubstrate; +pub(crate) type FinalityPipelineWococoFinalityToRococo = + SubstrateFinalityToSubstrate; + +#[derive(Clone, Debug)] +pub(crate) struct WococoFinalityToRococo { + finality_pipeline: FinalityPipelineWococoFinalityToRococo, +} + +impl WococoFinalityToRococo { + pub fn new(target_client: Client, target_sign: RococoSigningParams) -> Self { + Self { + finality_pipeline: FinalityPipelineWococoFinalityToRococo::new( + target_client, + target_sign, + ), + } + } +} impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo { - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; + type FinalitySyncPipeline = FinalityPipelineWococoFinalityToRococo; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; type TargetChain = Rococo; fn customize_metrics(params: MetricsParams) -> anyhow::Result { - crate::chains::add_polkadot_kusama_price_metrics::(params) + crate::chains::add_polkadot_kusama_price_metrics::(params) } fn start_relay_guards(&self) { relay_substrate_client::guard::abort_on_spec_version_change( - self.target_client.clone(), + self.finality_pipeline.target_client.clone(), bp_rococo::VERSION.spec_version, ); relay_substrate_client::guard::abort_when_account_balance_decreased( - self.target_client.clone(), + self.finality_pipeline.target_client.clone(), self.transactions_author(), MAXIMAL_BALANCE_DECREASE_PER_DAY, ); } fn transactions_author(&self) -> bp_rococo::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.finality_pipeline.target_sign.public().as_array_ref()).into() } fn make_submit_finality_proof_transaction( &self, - transaction_nonce: ::Index, + era: bp_runtime::TransactionEraOf, + transaction_nonce: IndexOf, header: WococoSyncHeader, proof: GrandpaJustification, ) -> Bytes { let call = relay_rococo_client::runtime::Call::BridgeGrandpaWococo( - relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof(header.into_inner(), proof), + relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof( + Box::new(header.into_inner()), + proof, + ), + ); + let genesis_hash = *self.finality_pipeline.target_client.genesis_hash(); + let transaction = Rococo::sign_transaction( + genesis_hash, + &self.finality_pipeline.target_sign, + era, + UnsignedTransaction::new(call, transaction_nonce), ); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); Bytes(transaction.encode()) } @@ -80,36 +111,19 @@ impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo { #[cfg(test)] mod tests { use super::*; - use frame_support::weights::WeightToFeePolynomial; - use pallet_bridge_grandpa::weights::WeightInfo; + use crate::chains::kusama_headers_to_polkadot::tests::compute_maximal_balance_decrease_per_day; #[test] fn maximal_balance_decrease_per_day_is_sane() { - // Rococo/Wococo GRANDPA pallet weights. They're now using Rialto weights => using `RialtoWeight` is justified. - // - // Using Rialto runtime this is slightly incorrect, because `DbWeight` of Rococo/Wococo runtime may differ - // from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is the same. - type RococoGrandpaPalletWeights = pallet_bridge_grandpa::weights::RialtoWeight; - - // The following formula shall not be treated as super-accurate - guard is to protect from mad relays, - // not to protect from over-average loses. - // - // Worst case: we're submitting proof for every source header. Since we submit every header, the number of - // headers in ancestry proof is near to 0 (let's round up to 2). And the number of authorities is 1024, - // which is (now) larger than on any existing chain => normally there'll be ~1024*2/3+1 commits. - const AVG_VOTES_ANCESTRIES_LEN: u32 = 2; - const AVG_PRECOMMITS_LEN: u32 = 1024 * 2 / 3 + 1; - let number_of_source_headers_per_day: bp_wococo::Balance = bp_wococo::DAYS as _; - let single_source_header_submit_call_weight = - RococoGrandpaPalletWeights::submit_finality_proof(AVG_VOTES_ANCESTRIES_LEN, AVG_PRECOMMITS_LEN); - // for simplicity - add extra weight for base tx fee + fee that is paid for the tx size + adjusted fee - let single_source_header_submit_tx_weight = single_source_header_submit_call_weight * 3 / 2; - let single_source_header_tx_cost = bp_rococo::WeightToFee::calc(&single_source_header_submit_tx_weight); - let maximal_expected_decrease = single_source_header_tx_cost * number_of_source_headers_per_day; + // we expect Wococo -> Rococo relay to be running in all-headers mode + let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::< + bp_kusama::Balance, + bp_kusama::WeightToFee, + >(bp_wococo::DAYS); assert!( - MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_expected_decrease, + MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease, "Maximal expected loss per day {} is larger than hardcoded {}", - maximal_expected_decrease, + maximal_balance_decrease, MAXIMAL_BALANCE_DECREASE_PER_DAY, ); } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs index b696801569e87f2c01c6ce0dd2cb46293bf123c4..dcba89e43f05ce90302fa03b41afc40f6085cfb5 100644 --- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs +++ b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs @@ -16,53 +16,81 @@ //! Wococo-to-Rococo messages sync entrypoint. -use crate::messages_lane::{ - select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, -}; -use crate::messages_source::SubstrateMessagesSource; -use crate::messages_target::SubstrateMessagesTarget; +use std::ops::RangeInclusive; -use bp_messages::MessageNonce; -use bp_runtime::{ROCOCO_CHAIN_ID, WOCOCO_CHAIN_ID}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; use codec::Encode; -use messages_relay::message_lane::MessageLane; -use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams}; -use relay_substrate_client::{metrics::StorageProofOverheadMetric, Chain, TransactionSignScheme}; -use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo}; use sp_core::{Bytes, Pair}; -use std::{ops::RangeInclusive, time::Duration}; + +use bp_messages::MessageNonce; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use frame_support::weights::Weight; +use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy}; +use relay_rococo_client::{ + HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams, +}; +use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction}; +use relay_wococo_client::{ + HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo, +}; +use substrate_relay_helper::{ + messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, + SubstrateMessageLane, SubstrateMessageLaneToSubstrate, + }, + messages_source::SubstrateMessagesSource, + messages_target::SubstrateMessagesTarget, + STALL_TIMEOUT, +}; /// Wococo-to-Rococo message lane. -pub type WococoMessagesToRococo = +pub type MessageLaneWococoMessagesToRococo = SubstrateMessageLaneToSubstrate; +#[derive(Clone)] +pub struct WococoMessagesToRococo { + message_lane: MessageLaneWococoMessagesToRococo, +} + impl SubstrateMessageLane for WococoMessagesToRococo { - const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD; + type MessageLane = MessageLaneWococoMessagesToRococo; + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = + bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD; const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = bp_rococo::TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD; - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_rococo::TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD; - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = + bp_wococo::FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD; const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = bp_wococo::FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD; - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_wococo::FROM_WOCOCO_UNREWARDED_RELAYERS_STATE; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = + bp_wococo::FROM_WOCOCO_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = + bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = + bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_wococo::WITH_ROCOCO_MESSAGES_PALLET_NAME; + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_rococo::WITH_WOCOCO_MESSAGES_PALLET_NAME; + + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = + bp_rococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT; type SourceChain = Wococo; type TargetChain = Rococo; fn source_transactions_author(&self) -> bp_wococo::AccountId { - (*self.source_sign.public().as_array_ref()).into() + (*self.message_lane.source_sign.public().as_array_ref()).into() } fn make_messages_receiving_proof_transaction( &self, - transaction_nonce: ::Index, + best_block_id: WococoHeaderId, + transaction_nonce: IndexOf, _generated_at_block: RococoHeaderId, - proof: ::MessagesReceivingProof, + proof: ::MessagesReceivingProof, ) -> Bytes { let (relayers_state, proof) = proof; let call = relay_wococo_client::runtime::Call::BridgeMessagesRococo( @@ -71,8 +99,16 @@ impl SubstrateMessageLane for WococoMessagesToRococo { relayers_state, ), ); - let genesis_hash = *self.source_client.genesis_hash(); - let transaction = Wococo::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.source_client.genesis_hash(); + let transaction = Wococo::sign_transaction( + genesis_hash, + &self.message_lane.source_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.source_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Rococo -> Wococo confirmation transaction. Weight: /{}, size: {}/{}", @@ -84,34 +120,39 @@ impl SubstrateMessageLane for WococoMessagesToRococo { } fn target_transactions_author(&self) -> bp_rococo::AccountId { - (*self.target_sign.public().as_array_ref()).into() + (*self.message_lane.target_sign.public().as_array_ref()).into() } fn make_messages_delivery_transaction( &self, - transaction_nonce: ::Index, + best_block_id: WococoHeaderId, + transaction_nonce: IndexOf, _generated_at_header: WococoHeaderId, _nonces: RangeInclusive, - proof: ::MessagesProof, + proof: ::MessagesProof, ) -> Bytes { let (dispatch_weight, proof) = proof; - let FromBridgedChainMessagesProof { - ref nonces_start, - ref nonces_end, - .. - } = proof; + let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof; let messages_count = nonces_end - nonces_start + 1; let call = relay_rococo_client::runtime::Call::BridgeMessagesWococo( relay_rococo_client::runtime::BridgeMessagesWococoCall::receive_messages_proof( - self.relayer_id_at_source.clone(), + self.message_lane.relayer_id_at_source.clone(), proof, messages_count as _, dispatch_weight, ), ); - let genesis_hash = *self.target_client.genesis_hash(); - let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + let genesis_hash = *self.message_lane.target_client.genesis_hash(); + let transaction = Rococo::sign_transaction( + genesis_hash, + &self.message_lane.target_sign, + relay_substrate_client::TransactionEra::new( + best_block_id, + self.message_lane.target_transactions_mortality, + ), + UnsignedTransaction::new(call, transaction_nonce), + ); log::trace!( target: "bridge", "Prepared Wococo -> Rococo delivery transaction. Weight: /{}, size: {}/{}", @@ -124,28 +165,43 @@ impl SubstrateMessageLane for WococoMessagesToRococo { } /// Wococo node as messages source. -type WococoSourceClient = - SubstrateMessagesSource; +type WococoSourceClient = SubstrateMessagesSource; /// Rococo node as messages target. -type RococoTargetClient = - SubstrateMessagesTarget; +type RococoTargetClient = SubstrateMessagesTarget; /// Run Wococo-to-Rococo messages sync. pub async fn run( - params: MessagesRelayParams, -) -> Result<(), String> { - let stall_timeout = Duration::from_secs(5 * 60); + params: MessagesRelayParams< + Wococo, + WococoSigningParams, + Rococo, + RococoSigningParams, + MixStrategy, + >, +) -> anyhow::Result<()> { + let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout( + params.source_transactions_mortality, + params.target_transactions_mortality, + Wococo::AVERAGE_BLOCK_INTERVAL, + Rococo::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); let relayer_id_at_wococo = (*params.source_sign.public().as_array_ref()).into(); let lane_id = params.lane_id; let source_client = params.source_client; + let target_client = params.target_client; let lane = WococoMessagesToRococo { - source_client: source_client.clone(), - source_sign: params.source_sign, - target_client: params.target_client.clone(), - target_sign: params.target_sign, - relayer_id_at_source: relayer_id_at_wococo, + message_lane: SubstrateMessageLaneToSubstrate { + source_client: source_client.clone(), + source_sign: params.source_sign, + source_transactions_mortality: params.source_transactions_mortality, + target_client: target_client.clone(), + target_sign: params.target_sign, + target_transactions_mortality: params.target_transactions_mortality, + relayer_id_at_source: relayer_id_at_wococo, + }, }; // 2/3 is reserved for proofs and tx overhead @@ -153,14 +209,14 @@ pub async fn run( // we don't know exact weights of the Rococo runtime. So to guess weights we'll be using // weights from Rialto and then simply dividing it by x2. let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( + select_delivery_transaction_limits::< + pallet_bridge_messages::weights::RialtoWeight, + >( bp_rococo::max_extrinsic_weight(), bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, ); - let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = ( - max_messages_in_single_batch / 2, - max_messages_weight_in_single_batch / 2, - ); + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + (max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2); log::info!( target: "bridge", @@ -168,13 +224,22 @@ pub async fn run( Wococo relayer account id: {:?}\n\t\ Max messages in single transaction: {}\n\t\ Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}", - lane.relayer_id_at_source, + Max messages weight in single transaction: {}\n\t\ + Tx mortality: {:?}/{:?}\n\t\ + Stall timeout: {:?}", + lane.message_lane.relayer_id_at_source, max_messages_in_single_batch, max_messages_size_in_single_batch, max_messages_weight_in_single_batch, + params.source_transactions_mortality, + params.target_transactions_mortality, + stall_timeout, ); + let standalone_metrics = params + .standalone_metrics + .map(Ok) + .unwrap_or_else(|| standalone_metrics(source_client.clone(), target_client.clone()))?; messages_relay::message_lane_loop::run( messages_relay::message_lane_loop::Params { lane: lane_id, @@ -183,45 +248,47 @@ pub async fn run( reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, stall_timeout, delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - max_unconfirmed_nonces_at_target: bp_rococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_unrewarded_relayer_entries_at_target: + bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: + bp_rococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, max_messages_in_single_batch, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, - relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic, + relay_strategy: params.relay_strategy, }, }, WococoSourceClient::new( source_client.clone(), lane.clone(), lane_id, - ROCOCO_CHAIN_ID, params.target_to_source_headers_relay, ), RococoTargetClient::new( - params.target_client, + target_client, lane, lane_id, - WOCOCO_CHAIN_ID, + standalone_metrics.clone(), params.source_to_target_headers_relay, ), - relay_utils::relay_metrics( - Some(messages_relay::message_lane_loop::metrics_prefix::< - WococoMessagesToRococo, - >(&lane_id)), - params.metrics_params, - ) - .standalone_metric(|registry, prefix| { - StorageProofOverheadMetric::new( - registry, - prefix, - source_client.clone(), - "wococo_storage_proof_overhead".into(), - "Wococo storage proof overhead".into(), - ) - })? - .into_params(), + standalone_metrics.register_and_spawn(params.metrics_params)?, futures::future::pending(), ) .await + .map_err(Into::into) +} + +/// Create standalone metrics for the Wococo -> Rococo messages loop. +pub(crate) fn standalone_metrics( + source_client: Client, + target_client: Client, +) -> anyhow::Result> { + substrate_relay_helper::messages_lane::standalone_metrics( + source_client, + target_client, + None, + None, + None, + None, + ) } diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs index 1feb3dcb1a46d25c82dcf6c80159c5b3de5bb396..1af6142c53eca9e1eff7768b8a6083b6c2685921 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs @@ -14,17 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use structopt::clap::arg_enum; - -arg_enum! { - #[derive(Debug, PartialEq, Eq)] - /// Supported full bridges (headers + messages). - pub enum FullBridge { - MillauToRialto, - RialtoToMillau, - RococoToWococo, - WococoToRococo, - } +use strum::{EnumString, EnumVariantNames}; + +#[derive(Debug, PartialEq, Eq, EnumString, EnumVariantNames)] +#[strum(serialize_all = "kebab_case")] +/// Supported full bridges (headers + messages). +pub enum FullBridge { + MillauToRialto, + RialtoToMillau, + RococoToWococo, + WococoToRococo, + KusamaToPolkadot, + PolkadotToKusama, } impl FullBridge { @@ -35,6 +36,8 @@ impl FullBridge { Self::RialtoToMillau => RIALTO_TO_MILLAU_INDEX, Self::RococoToWococo => ROCOCO_TO_WOCOCO_INDEX, Self::WococoToRococo => WOCOCO_TO_ROCOCO_INDEX, + Self::KusamaToPolkadot => KUSAMA_TO_POLKADOT_INDEX, + Self::PolkadotToKusama => POLKADOT_TO_KUSAMA_INDEX, } } } @@ -43,6 +46,8 @@ pub const RIALTO_TO_MILLAU_INDEX: u8 = 0; pub const MILLAU_TO_RIALTO_INDEX: u8 = 0; pub const ROCOCO_TO_WOCOCO_INDEX: u8 = 0; pub const WOCOCO_TO_ROCOCO_INDEX: u8 = 0; +pub const KUSAMA_TO_POLKADOT_INDEX: u8 = 0; +pub const POLKADOT_TO_KUSAMA_INDEX: u8 = 0; /// The macro allows executing bridge-specific code without going fully generic. /// @@ -139,6 +144,50 @@ macro_rules! select_full_bridge { #[allow(unused_imports)] use relay_wococo_client::runtime::wococo_to_rococo_account_ownership_digest as account_ownership_digest; + $generic + } + FullBridge::KusamaToPolkadot => { + type Source = relay_kusama_client::Kusama; + #[allow(dead_code)] + type Target = relay_polkadot_client::Polkadot; + + // Derive-account + #[allow(unused_imports)] + use bp_polkadot::derive_account_from_kusama_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::kusama_messages_to_polkadot::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_polkadot::TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + // Send-message + #[allow(unused_imports)] + use relay_kusama_client::runtime::kusama_to_polkadot_account_ownership_digest as account_ownership_digest; + + $generic + } + FullBridge::PolkadotToKusama => { + type Source = relay_polkadot_client::Polkadot; + #[allow(dead_code)] + type Target = relay_kusama_client::Kusama; + + // Derive-account + #[allow(unused_imports)] + use bp_kusama::derive_account_from_polkadot_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::polkadot_messages_to_kusama::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_kusama::TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + // Send-message + #[allow(unused_imports)] + use relay_polkadot_client::runtime::polkadot_to_kusama_account_ownership_digest as account_ownership_digest; + $generic } } diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs b/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs index c7ac8761f133bba5b9c722041c54bf1a24d2f264..5b809eb69f22237ebf0505861e2ffbbcf73eba9b 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs @@ -14,10 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::{bridge::FullBridge, AccountId}; -use crate::select_full_bridge; +use crate::{ + cli::{bridge::FullBridge, AccountId}, + select_full_bridge, +}; use relay_substrate_client::Chain; use structopt::StructOpt; +use strum::VariantNames; /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. /// @@ -28,7 +31,7 @@ use structopt::StructOpt; #[derive(StructOpt)] pub struct DeriveAccount { /// A bridge instance to initialize. - #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] bridge: FullBridge, /// Source-chain address to derive Target-chain address from. account: AccountId, @@ -54,11 +57,7 @@ impl DeriveAccount { select_full_bridge!(self.bridge, { let (account, derived_account) = self.derive_account(); println!("Source address:\n{} ({})", account, Source::NAME); - println!( - "->Corresponding (derived) address:\n{} ({})", - derived_account, - Target::NAME, - ); + println!("->Corresponding (derived) address:\n{} ({})", derived_account, Target::NAME,); Ok(()) }) @@ -80,9 +79,9 @@ mod tests { let millau = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9"; // when - let (rialto_parsed, rialto_derived) = derive_account_cli("RialtoToMillau", rialto); - let (millau_parsed, millau_derived) = derive_account_cli("MillauToRialto", millau); - let (millau2_parsed, millau2_derived) = derive_account_cli("MillauToRialto", rialto); + let (rialto_parsed, rialto_derived) = derive_account_cli("rialto-to-millau", rialto); + let (millau_parsed, millau_derived) = derive_account_cli("millau-to-rialto", millau); + let (millau2_parsed, millau2_derived) = derive_account_cli("millau-to-rialto", rialto); // then assert_eq!(format!("{}", rialto_parsed), rialto); diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs index cfe6d99a4eb9f76988845b34342b66368e6d45cd..e17854662e5c4696f3b3cfb62b2758d6e6e5348b 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs @@ -14,18 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::bridge::FullBridge; -use crate::cli::{AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId}; -use crate::select_full_bridge; +use crate::{ + cli::{ + bridge::FullBridge, AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, + }, + select_full_bridge, +}; use frame_support::weights::DispatchInfo; use relay_substrate_client::Chain; use structopt::StructOpt; +use strum::VariantNames; /// Encode source chain runtime call. #[derive(StructOpt, Debug)] pub struct EncodeCall { /// A bridge instance to encode call for. - #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] bridge: FullBridge, #[structopt(flatten)] call: Call, @@ -125,31 +129,30 @@ pub(crate) fn preprocess_call {} - Call::Remark { - ref remark_size, - ref mut remark_payload, - } => { + Call::Raw { .. } => {}, + Call::Remark { ref remark_size, ref mut remark_payload } => if remark_payload.is_none() { *remark_payload = Some(HexBytes(generate_remark_payload( remark_size, - compute_maximal_message_arguments_size(Source::max_extrinsic_size(), Target::max_extrinsic_size()), + compute_maximal_message_arguments_size( + Source::max_extrinsic_size(), + Target::max_extrinsic_size(), + ), ))); - } - } + }, Call::Transfer { ref mut recipient, .. } => { recipient.enforce_chain::(); - } - Call::BridgeSendMessage { - ref mut bridge_instance_index, - .. - } => { + }, + Call::BridgeSendMessage { ref mut bridge_instance_index, .. } => { *bridge_instance_index = bridge_instance; - } + }, }; } -fn generate_remark_payload(remark_size: &Option>, maximal_allowed_size: u32) -> Vec { +fn generate_remark_payload( + remark_size: &Option>, + maximal_allowed_size: u32, +) -> Vec { match remark_size { Some(ExplicitOrMaximal::Explicit(remark_size)) => vec![0; *remark_size], Some(ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _], @@ -171,9 +174,11 @@ pub(crate) fn compute_maximal_message_arguments_size( ) -> u32 { // assume that both signed extensions and other arguments fit 1KB let service_tx_bytes_on_source_chain = 1024; - let maximal_source_extrinsic_size = maximal_source_extrinsic_size - service_tx_bytes_on_source_chain; - let maximal_call_size = - bridge_runtime_common::messages::target::maximal_incoming_message_size(maximal_target_extrinsic_size); + let maximal_source_extrinsic_size = + maximal_source_extrinsic_size - service_tx_bytes_on_source_chain; + let maximal_call_size = bridge_runtime_common::messages::target::maximal_incoming_message_size( + maximal_target_extrinsic_size, + ); let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size { maximal_source_extrinsic_size } else { @@ -188,13 +193,14 @@ pub(crate) fn compute_maximal_message_arguments_size( #[cfg(test)] mod tests { use super::*; + use crate::cli::send_message::SendMessage; #[test] fn should_encode_transfer_call() { // given let mut encode_call = EncodeCall::from_iter(vec![ "encode-call", - "RialtoToMillau", + "rialto-to-millau", "transfer", "--amount", "12345", @@ -208,20 +214,21 @@ mod tests { // then assert_eq!( format!("{:?}", hex), - "0x0c00d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0" + "0x040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0" ); } #[test] fn should_encode_remark_with_default_payload() { // given - let mut encode_call = EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark"]); + let mut encode_call = + EncodeCall::from_iter(vec!["encode-call", "rialto-to-millau", "remark"]); // when let hex = encode_call.encode().unwrap(); // then - assert!(format!("{:?}", hex).starts_with("0x070154556e69782074696d653a")); + assert!(format!("{:?}", hex).starts_with("0x000154556e69782074696d653a")); } #[test] @@ -229,7 +236,7 @@ mod tests { // given let mut encode_call = EncodeCall::from_iter(vec![ "encode-call", - "RialtoToMillau", + "rialto-to-millau", "remark", "--remark-payload", "1234", @@ -239,20 +246,25 @@ mod tests { let hex = encode_call.encode().unwrap(); // then - assert_eq!(format!("{:?}", hex), "0x0701081234"); + assert_eq!(format!("{:?}", hex), "0x0001081234"); } #[test] fn should_encode_remark_with_size() { // given - let mut encode_call = - EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark", "--remark-size", "12"]); + let mut encode_call = EncodeCall::from_iter(vec![ + "encode-call", + "rialto-to-millau", + "remark", + "--remark-size", + "12", + ]); // when let hex = encode_call.encode().unwrap(); // then - assert_eq!(format!("{:?}", hex), "0x070130000000000000000000000000"); + assert_eq!(format!("{:?}", hex), "0x000130000000000000000000000000"); } #[test] @@ -260,7 +272,7 @@ mod tests { // when let err = EncodeCall::from_iter_safe(vec![ "encode-call", - "RialtoToMillau", + "rialto-to-millau", "remark", "--remark-payload", "1234", @@ -273,6 +285,68 @@ mod tests { assert_eq!(err.kind, structopt::clap::ErrorKind::ArgumentConflict); let info = err.info.unwrap(); - assert!(info.contains(&"remark-payload".to_string()) | info.contains(&"remark-size".to_string())) + assert!( + info.contains(&"remark-payload".to_string()) | + info.contains(&"remark-size".to_string()) + ) + } + + #[test] + fn should_encode_raw_call() { + // given + let mut encode_call = EncodeCall::from_iter(vec![ + "encode-call", + "rialto-to-millau", + "raw", + "040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0", + ]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert_eq!( + format!("{:?}", hex), + "0x040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0" + ); + } + + #[test] + fn should_encode_bridge_send_message_call() { + // given + let encode_message = SendMessage::from_iter(vec![ + "send-message", + "millau-to-rialto", + "--source-port", + "10946", + "--source-signer", + "//Alice", + "--target-signer", + "//Alice", + "--origin", + "Target", + "remark", + ]) + .encode_payload() + .unwrap(); + + let mut encode_call = EncodeCall::from_iter(vec![ + "encode-call", + "rialto-to-millau", + "bridge-send-message", + "--fee", + "12345", + "--payload", + format!("{:}", &HexBytes::encode(&encode_message)).as_str(), + ]); + + // when + let call_hex = encode_call.encode().unwrap(); + + // then + assert!(format!("{:?}", call_hex).starts_with( + "0x0f030000000001000000381409000000000001d43593c715fdd31c61141abd04a99fd6822c8558854cc\ + de39a5684e7a56da27d01d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01" + )) } } diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs index 79b7b9a42cd37913feaa87ffa4c9eeea53c5c55d..98e1269aa68e6002ba463503e2b960cdf14c4814 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs @@ -14,9 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::{bridge::FullBridge, AccountId, CliChain, HexBytes}; -use crate::select_full_bridge; +use crate::{ + cli::{bridge::FullBridge, AccountId, CliChain, HexBytes}, + select_full_bridge, +}; use structopt::StructOpt; +use strum::VariantNames; /// Generic message payload. #[derive(StructOpt, Debug, PartialEq, Eq)] @@ -41,7 +44,7 @@ pub enum MessagePayload { #[derive(StructOpt)] pub struct EncodeMessage { /// A bridge instance to initialize. - #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] bridge: FullBridge, #[structopt(flatten)] payload: MessagePayload, @@ -51,7 +54,8 @@ impl EncodeMessage { /// Run the command. pub fn encode(self) -> anyhow::Result { select_full_bridge!(self.bridge, { - let payload = Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?; + let payload = + Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?; Ok(HexBytes::encode(&payload)) }) } @@ -73,7 +77,8 @@ mod tests { fn should_encode_raw_message() { // given let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000"; - let encode_message = EncodeMessage::from_iter(vec!["encode-message", "MillauToRialto", "raw", msg]); + let encode_message = + EncodeMessage::from_iter(vec!["encode-message", "rialto-to-millau", "raw", msg]); // when let hex = encode_message.encode().unwrap(); @@ -88,7 +93,7 @@ mod tests { let sender = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check(); let encode_message = EncodeMessage::from_iter(vec![ "encode-message", - "RialtoToMillau", + "rialto-to-millau", "call", "--sender", &sender, @@ -101,6 +106,6 @@ mod tests { let hex = encode_message.encode().unwrap(); // then - assert_eq!(format!("{:?}", hex), "0x01000000b0d60f000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000"); + assert_eq!(format!("{:?}", hex), "0x0100000010f108000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c000130000000000000000000000000"); } } diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs b/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs index 129699c26917c17b826c5f346a93b3234268f3cd..d063ce544cd243099711c9c0b5b048c5b8af90e7 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs @@ -14,18 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::bridge::FullBridge; -use crate::cli::{Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams}; -use crate::select_full_bridge; +use crate::{ + cli::{bridge::FullBridge, Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams}, + select_full_bridge, +}; +use bp_runtime::BalanceOf; use codec::{Decode, Encode}; use relay_substrate_client::Chain; use structopt::StructOpt; +use strum::VariantNames; /// Estimate Delivery & Dispatch Fee command. #[derive(StructOpt, Debug, PartialEq, Eq)] pub struct EstimateFee { /// A bridge instance to encode call for. - #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] bridge: FullBridge, #[structopt(flatten)] source: SourceConnectionParams, @@ -40,21 +43,21 @@ pub struct EstimateFee { impl EstimateFee { /// Run the command. pub async fn run(self) -> anyhow::Result<()> { - let Self { - source, - bridge, - lane, - payload, - } = self; + let Self { source, bridge, lane, payload } = self; select_full_bridge!(bridge, { let source_client = source.to_client::().await?; let lane = lane.into(); - let payload = Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?; + let payload = + Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?; - let fee: ::Balance = - estimate_message_delivery_and_dispatch_fee(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload) - .await?; + let fee: BalanceOf = estimate_message_delivery_and_dispatch_fee( + &source_client, + ESTIMATE_MESSAGE_FEE_METHOD, + lane, + payload, + ) + .await?; log::info!(target: "bridge", "Fee: {:?}", Balance(fee as _)); println!("{}", fee); @@ -72,10 +75,11 @@ pub(crate) async fn estimate_message_delivery_and_dispatch_fee = - Decode::decode(&mut &encoded_response.0[..]).map_err(relay_substrate_client::Error::ResponseParseFailed)?; - let fee = decoded_response - .ok_or_else(|| anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec())))?; + let decoded_response: Option = Decode::decode(&mut &encoded_response.0[..]) + .map_err(relay_substrate_client::Error::ResponseParseFailed)?; + let fee = decoded_response.ok_or_else(|| { + anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec())) + })?; Ok(fee) } @@ -93,7 +97,7 @@ mod tests { // when let res = EstimateFee::from_iter(vec![ "estimate_fee", - "RialtoToMillau", + "rialto-to-millau", "--source-port", "1234", "call", diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs index 25f220d7f6cebc6bd51f67054c82b1486263d5e1..ffda0b1200884bac7924baddbb220af5c8e3ab72 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs @@ -18,15 +18,16 @@ use crate::cli::{SourceConnectionParams, TargetConnectionParams, TargetSigningPa use bp_header_chain::InitializationData; use bp_runtime::Chain as ChainBase; use codec::Encode; -use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_substrate_client::{Chain, TransactionSignScheme, UnsignedTransaction}; use sp_core::{Bytes, Pair}; -use structopt::{clap::arg_enum, StructOpt}; +use structopt::StructOpt; +use strum::{EnumString, EnumVariantNames, VariantNames}; /// Initialize bridge pallet. #[derive(StructOpt)] pub struct InitBridge { /// A bridge instance to initialize. - #[structopt(possible_values = &InitBridgeName::variants(), case_insensitive = true)] + #[structopt(possible_values = InitBridgeName::VARIANTS, case_insensitive = true)] bridge: InitBridgeName, #[structopt(flatten)] source: SourceConnectionParams, @@ -36,17 +37,17 @@ pub struct InitBridge { target_sign: TargetSigningParams, } -// TODO [#851] Use kebab-case. -arg_enum! { - #[derive(Debug)] - /// Bridge to initialize. - pub enum InitBridgeName { - MillauToRialto, - RialtoToMillau, - WestendToMillau, - RococoToWococo, - WococoToRococo, - } +#[derive(Debug, EnumString, EnumVariantNames)] +#[strum(serialize_all = "kebab_case")] +/// Bridge to initialize. +pub enum InitBridgeName { + MillauToRialto, + RialtoToMillau, + WestendToMillau, + RococoToWococo, + WococoToRococo, + KusamaToPolkadot, + PolkadotToKusama, } macro_rules! select_bridge { @@ -59,14 +60,17 @@ macro_rules! select_bridge { fn encode_init_bridge( init_data: InitializationData<::Header>, ) -> ::Call { - rialto_runtime::SudoCall::sudo(Box::new( - rialto_runtime::BridgeGrandpaMillauCall::initialize(init_data).into(), - )) + rialto_runtime::SudoCall::sudo { + call: Box::new( + rialto_runtime::BridgeGrandpaMillauCall::initialize { init_data } + .into(), + ), + } .into() } $generic - } + }, InitBridgeName::RialtoToMillau => { type Source = relay_rialto_client::Rialto; type Target = relay_millau_client::Millau; @@ -74,15 +78,17 @@ macro_rules! select_bridge { fn encode_init_bridge( init_data: InitializationData<::Header>, ) -> ::Call { - let initialize_call = millau_runtime::BridgeGrandpaRialtoCall::< + let initialize_call = millau_runtime::BridgeGrandpaCall::< millau_runtime::Runtime, millau_runtime::RialtoGrandpaInstance, - >::initialize(init_data); - millau_runtime::SudoCall::sudo(Box::new(initialize_call.into())).into() + >::initialize { + init_data, + }; + millau_runtime::SudoCall::sudo { call: Box::new(initialize_call.into()) }.into() } $generic - } + }, InitBridgeName::WestendToMillau => { type Source = relay_westend_client::Westend; type Target = relay_millau_client::Millau; @@ -90,18 +96,21 @@ macro_rules! select_bridge { fn encode_init_bridge( init_data: InitializationData<::Header>, ) -> ::Call { - // at Westend -> Millau initialization we're not using sudo, because otherwise our deployments - // may fail, because we need to initialize both Rialto -> Millau and Westend -> Millau bridge. - // => since there's single possible sudo account, one of transaction may fail with duplicate nonce error - millau_runtime::BridgeGrandpaWestendCall::< + // at Westend -> Millau initialization we're not using sudo, because otherwise + // our deployments may fail, because we need to initialize both Rialto -> Millau + // and Westend -> Millau bridge. => since there's single possible sudo account, + // one of transaction may fail with duplicate nonce error + millau_runtime::BridgeGrandpaCall::< millau_runtime::Runtime, millau_runtime::WestendGrandpaInstance, - >::initialize(init_data) + >::initialize { + init_data, + } .into() } $generic - } + }, InitBridgeName::RococoToWococo => { type Source = relay_rococo_client::Rococo; type Target = relay_wococo_client::Wococo; @@ -110,12 +119,14 @@ macro_rules! select_bridge { init_data: InitializationData<::Header>, ) -> ::Call { relay_wococo_client::runtime::Call::BridgeGrandpaRococo( - relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize(init_data), + relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize( + init_data, + ), ) } $generic - } + }, InitBridgeName::WococoToRococo => { type Source = relay_wococo_client::Wococo; type Target = relay_rococo_client::Rococo; @@ -124,12 +135,46 @@ macro_rules! select_bridge { init_data: InitializationData<::Header>, ) -> ::Call { relay_rococo_client::runtime::Call::BridgeGrandpaWococo( - relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize(init_data), + relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize( + init_data, + ), + ) + } + + $generic + }, + InitBridgeName::KusamaToPolkadot => { + type Source = relay_kusama_client::Kusama; + type Target = relay_polkadot_client::Polkadot; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + relay_polkadot_client::runtime::Call::BridgeKusamaGrandpa( + relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::initialize( + init_data, + ), + ) + } + + $generic + }, + InitBridgeName::PolkadotToKusama => { + type Source = relay_polkadot_client::Polkadot; + type Target = relay_kusama_client::Kusama; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + relay_kusama_client::runtime::Call::BridgePolkadotGrandpa( + relay_kusama_client::runtime::BridgePolkadotGrandpaCall::initialize( + init_data, + ), ) } $generic - } + }, } }; } @@ -142,7 +187,7 @@ impl InitBridge { let target_client = self.target.to_client::().await?; let target_sign = self.target_sign.to_keypair::()?; - crate::headers_initialize::initialize( + substrate_relay_helper::headers_initialize::initialize( source_client, target_client.clone(), target_sign.public().into(), @@ -151,8 +196,11 @@ impl InitBridge { Target::sign_transaction( *target_client.genesis_hash(), &target_sign, - transaction_nonce, - encode_init_bridge(initialization_data), + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + encode_init_bridge(initialization_data), + transaction_nonce, + ), ) .encode(), ) diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs b/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs index 042ae320d502f0da1cc0376a7a1673afdde287ec..d98e8af0af084d297a4610fd2cbd72ca6593a5ff 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs @@ -32,9 +32,12 @@ pub(crate) mod send_message; mod derive_account; mod init_bridge; +mod register_parachain; mod relay_headers; mod relay_headers_and_messages; mod relay_messages; +mod resubmit_transactions; +mod swap_tokens; /// Parse relay CLI args. pub fn parse_args() -> Command { @@ -84,8 +87,15 @@ pub enum Command { EncodeMessage(encode_message::EncodeMessage), /// Estimate Delivery and Dispatch Fee required for message submission to messages pallet. EstimateFee(estimate_fee::EstimateFee), - /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. + /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target + /// chain. DeriveAccount(derive_account::DeriveAccount), + /// Resubmit transactions with increased tip if they are stalled. + ResubmitTransactions(resubmit_transactions::ResubmitTransactions), + /// Swap tokens using token-swap bridge. + SwapTokens(swap_tokens::SwapTokens), + /// Register parachain. + RegisterParachain(register_parachain::RegisterParachain), } impl Command { @@ -94,12 +104,15 @@ impl Command { use relay_utils::initialize::{initialize_logger, initialize_relay}; match self { - Self::RelayHeaders(_) | Self::RelayMessages(_) | Self::RelayHeadersAndMessages(_) | Self::InitBridge(_) => { + Self::RelayHeaders(_) | + Self::RelayMessages(_) | + Self::RelayHeadersAndMessages(_) | + Self::InitBridge(_) => { initialize_relay(); - } + }, _ => { initialize_logger(false); - } + }, } } @@ -116,6 +129,9 @@ impl Command { Self::EncodeMessage(arg) => arg.run().await?, Self::EstimateFee(arg) => arg.run().await?, Self::DeriveAccount(arg) => arg.run().await?, + Self::ResubmitTransactions(arg) => arg.run().await?, + Self::SwapTokens(arg) => arg.run().await?, + Self::RegisterParachain(arg) => arg.run().await?, } Ok(()) } @@ -187,10 +203,7 @@ const SS58_FORMAT_PROOF: &str = "u16 -> Ss58Format is infallible; qed"; impl AccountId { /// Create new SS58-formatted address from raw account id. pub fn from_raw(account: sp_runtime::AccountId32) -> Self { - Self { - account, - ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF), - } + Self { account, ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF) } } /// Enforces formatting account to be for given [`CliChain`] type. @@ -228,7 +241,7 @@ pub trait CliChain: relay_substrate_client::Chain { /// Chain's current version of the runtime. const RUNTIME_VERSION: sp_version::RuntimeVersion; - /// Crypto keypair type used to send messages. + /// Crypto KeyPair type used to send messages. /// /// In case of chains supporting multiple cryptos, pick one used by the CLI. type KeyPair: sp_core::crypto::Pair; @@ -242,7 +255,9 @@ pub trait CliChain: relay_substrate_client::Chain { fn ss58_format() -> u16; /// Construct message payload to be sent over the bridge. - fn encode_message(message: crate::cli::encode_message::MessagePayload) -> Result; + fn encode_message( + message: crate::cli::encode_message::MessagePayload, + ) -> anyhow::Result; /// Maximal extrinsic weight (from the runtime). fn max_extrinsic_weight() -> Weight; @@ -344,7 +359,7 @@ where fn from_str(s: &str) -> Result { if s.to_lowercase() == "max" { - return Ok(ExplicitOrMaximal::Maximal); + return Ok(ExplicitOrMaximal::Maximal) } V::from_str(s) @@ -360,7 +375,7 @@ macro_rules! declare_chain_options { ($chain:ident, $chain_prefix:ident) => { paste::item! { #[doc = $chain " connection params."] - #[derive(StructOpt, Debug, PartialEq, Eq)] + #[derive(StructOpt, Debug, PartialEq, Eq, Clone)] pub struct [<$chain ConnectionParams>] { #[doc = "Connect to " $chain " node at given host."] #[structopt(long, default_value = "127.0.0.1")] @@ -374,28 +389,117 @@ macro_rules! declare_chain_options { } #[doc = $chain " signing params."] - #[derive(StructOpt, Debug, PartialEq, Eq)] + #[derive(StructOpt, Debug, PartialEq, Eq, Clone)] pub struct [<$chain SigningParams>] { #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] #[structopt(long)] - pub [<$chain_prefix _signer>]: String, + pub [<$chain_prefix _signer>]: Option, #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] #[structopt(long)] pub [<$chain_prefix _signer_password>]: Option, + + #[doc = "Path to the file, that contains SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer option."] + #[structopt(long)] + pub [<$chain_prefix _signer_file>]: Option, + #[doc = "Path to the file, that password for the SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer_password option."] + #[structopt(long)] + pub [<$chain_prefix _signer_password_file>]: Option, + + #[doc = "Transactions mortality period, in blocks. MUST be a power of two in [4; 65536] range. MAY NOT be larger than `BlockHashCount` parameter of the chain system module."] + #[structopt(long)] + pub [<$chain_prefix _transactions_mortality>]: Option, + } + + #[doc = "Parameters required to sign transaction on behalf of owner of the messages pallet at " $chain "."] + #[derive(StructOpt, Debug, PartialEq, Eq)] + pub struct [<$chain MessagesPalletOwnerSigningParams>] { + #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _messages_pallet_owner>]: Option, + #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _messages_pallet_owner_password>]: Option, } impl [<$chain SigningParams>] { - /// Parse signing params into chain-specific `KeyPair`. + /// Return transactions mortality. + #[allow(dead_code)] + pub fn transactions_mortality(&self) -> anyhow::Result> { + self.[<$chain_prefix _transactions_mortality>] + .map(|transactions_mortality| { + if !(4..=65536).contains(&transactions_mortality) + || !transactions_mortality.is_power_of_two() + { + Err(anyhow::format_err!( + "Transactions mortality {} is not a power of two in a [4; 65536] range", + transactions_mortality, + )) + } else { + Ok(transactions_mortality) + } + }) + .transpose() + } + + /// Parse signing params into chain-specific KeyPair. + #[allow(dead_code)] pub fn to_keypair(&self) -> anyhow::Result { + let suri = match (self.[<$chain_prefix _signer>].as_ref(), self.[<$chain_prefix _signer_file>].as_ref()) { + (Some(suri), _) => suri.to_owned(), + (None, Some(suri_file)) => std::fs::read_to_string(suri_file) + .map_err(|err| anyhow::format_err!( + "Failed to read SURI from file {:?}: {}", + suri_file, + err, + ))?, + (None, None) => return Err(anyhow::format_err!( + "One of options must be specified: '{}' or '{}'", + stringify!([<$chain_prefix _signer>]), + stringify!([<$chain_prefix _signer_file>]), + )), + }; + + let suri_password = match ( + self.[<$chain_prefix _signer_password>].as_ref(), + self.[<$chain_prefix _signer_password_file>].as_ref(), + ) { + (Some(suri_password), _) => Some(suri_password.to_owned()), + (None, Some(suri_password_file)) => std::fs::read_to_string(suri_password_file) + .map(Some) + .map_err(|err| anyhow::format_err!( + "Failed to read SURI password from file {:?}: {}", + suri_password_file, + err, + ))?, + _ => None, + }; + use sp_core::crypto::Pair; Chain::KeyPair::from_string( - &self.[<$chain_prefix _signer>], - self.[<$chain_prefix _signer_password>].as_deref() + &suri, + suri_password.as_deref() ).map_err(|e| anyhow::format_err!("{:?}", e)) } } + #[allow(dead_code)] + impl [<$chain MessagesPalletOwnerSigningParams>] { + /// Parse signing params into chain-specific KeyPair. + pub fn to_keypair(&self) -> anyhow::Result> { + use sp_core::crypto::Pair; + + let [<$chain_prefix _messages_pallet_owner>] = match self.[<$chain_prefix _messages_pallet_owner>] { + Some(ref messages_pallet_owner) => messages_pallet_owner, + None => return Ok(None), + }; + Chain::KeyPair::from_string( + [<$chain_prefix _messages_pallet_owner>], + self.[<$chain_prefix _messages_pallet_owner_password>].as_deref() + ).map_err(|e| anyhow::format_err!("{:?}", e)).map(Some) + } + } + impl [<$chain ConnectionParams>] { /// Convert connection params into Substrate client. pub async fn to_client( @@ -416,9 +520,12 @@ macro_rules! declare_chain_options { declare_chain_options!(Source, source); declare_chain_options!(Target, target); +declare_chain_options!(Relaychain, relaychain); +declare_chain_options!(Parachain, parachain); #[cfg(test)] mod tests { + use sp_core::Pair; use std::str::FromStr; use super::*; @@ -434,10 +541,7 @@ mod tests { let expected = vec![rialto1, rialto2, millau1, millau2]; // when - let parsed = expected - .iter() - .map(|s| AccountId::from_str(s).unwrap()) - .collect::>(); + let parsed = expected.iter().map(|s| AccountId::from_str(s).unwrap()).collect::>(); let actual = parsed.iter().map(|a| format!("{}", a)).collect::>(); @@ -456,4 +560,93 @@ mod tests { // then assert_eq!(hex.0, hex2.0); } + + #[test] + fn reads_suri_from_file() { + const ALICE: &str = "//Alice"; + const BOB: &str = "//Bob"; + const ALICE_PASSWORD: &str = "alice_password"; + const BOB_PASSWORD: &str = "bob_password"; + + let alice = sp_core::sr25519::Pair::from_string(ALICE, Some(ALICE_PASSWORD)).unwrap(); + let bob = sp_core::sr25519::Pair::from_string(BOB, Some(BOB_PASSWORD)).unwrap(); + let bob_with_alice_password = + sp_core::sr25519::Pair::from_string(BOB, Some(ALICE_PASSWORD)).unwrap(); + + let temp_dir = tempfile::tempdir().unwrap(); + let mut suri_file_path = temp_dir.path().to_path_buf(); + let mut password_file_path = temp_dir.path().to_path_buf(); + suri_file_path.push("suri"); + password_file_path.push("password"); + std::fs::write(&suri_file_path, BOB.as_bytes()).unwrap(); + std::fs::write(&password_file_path, BOB_PASSWORD.as_bytes()).unwrap(); + + // when both seed and password are read from file + assert_eq!( + TargetSigningParams { + target_signer: Some(ALICE.into()), + target_signer_password: Some(ALICE_PASSWORD.into()), + + target_signer_file: None, + target_signer_password_file: None, + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(alice.public()), + ); + + // when both seed and password are read from file + assert_eq!( + TargetSigningParams { + target_signer: None, + target_signer_password: None, + + target_signer_file: Some(suri_file_path.clone()), + target_signer_password_file: Some(password_file_path.clone()), + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(bob.public()), + ); + + // when password are is overriden by cli option + assert_eq!( + TargetSigningParams { + target_signer: None, + target_signer_password: Some(ALICE_PASSWORD.into()), + + target_signer_file: Some(suri_file_path.clone()), + target_signer_password_file: Some(password_file_path.clone()), + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(bob_with_alice_password.public()), + ); + + // when both seed and password are overriden by cli options + assert_eq!( + TargetSigningParams { + target_signer: Some(ALICE.into()), + target_signer_password: Some(ALICE_PASSWORD.into()), + + target_signer_file: Some(suri_file_path), + target_signer_password_file: Some(password_file_path), + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(alice.public()), + ); + } } diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs b/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs new file mode 100644 index 0000000000000000000000000000000000000000..fecc431148ebde0b338eef997666081b11bee708 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs @@ -0,0 +1,346 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{ + swap_tokens::wait_until_transaction_is_finalized, Balance, ParachainConnectionParams, + RelaychainConnectionParams, RelaychainSigningParams, +}; + +use codec::Encode; +use num_traits::Zero; +use polkadot_parachain::primitives::{ + HeadData as ParaHeadData, Id as ParaId, ValidationCode as ParaValidationCode, +}; +use polkadot_runtime_common::{ + paras_registrar::Call as ParaRegistrarCall, slots::Call as ParaSlotsCall, +}; +use polkadot_runtime_parachains::paras::ParaLifecycle; +use relay_substrate_client::{ + AccountIdOf, CallOf, Chain, Client, TransactionSignScheme, UnsignedTransaction, +}; +use rialto_runtime::SudoCall; +use sp_core::{ + storage::{well_known_keys::CODE, StorageKey}, + Bytes, Pair, +}; +use structopt::StructOpt; +use strum::{EnumString, EnumVariantNames, VariantNames}; + +/// Name of the `NextFreeParaId` value in the `polkadot_runtime_common::paras_registrar` pallet. +const NEXT_FREE_PARA_ID_STORAGE_NAME: &str = "NextFreeParaId"; +/// Name of the `ParaLifecycles` map in the `polkadot_runtime_parachains::paras` pallet. +const PARAS_LIFECYCLES_STORAGE_NAME: &str = "ParaLifecycles"; + +/// Register parachain. +#[derive(StructOpt, Debug, PartialEq)] +pub struct RegisterParachain { + /// A parachain to register. + #[structopt(possible_values = Parachain::VARIANTS, case_insensitive = true)] + parachain: Parachain, + /// Parachain deposit. + #[structopt(long, default_value = "0")] + deposit: Balance, + /// Lease begin. + #[structopt(long, default_value = "0")] + lease_begin: u32, + /// Lease end. + #[structopt(long, default_value = "256")] + lease_end: u32, + #[structopt(flatten)] + relay_connection: RelaychainConnectionParams, + #[structopt(flatten)] + relay_sign: RelaychainSigningParams, + #[structopt(flatten)] + para_connection: ParachainConnectionParams, +} + +/// Parachain to register. +#[derive(Debug, EnumString, EnumVariantNames, PartialEq)] +#[strum(serialize_all = "kebab_case")] +pub enum Parachain { + RialtoParachain, +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + Parachain::RialtoParachain => { + type Relaychain = relay_rialto_client::Rialto; + type Parachain = relay_rialto_parachain_client::RialtoParachain; + + use bp_rialto::{PARAS_PALLET_NAME, PARAS_REGISTRAR_PALLET_NAME}; + + $generic + }, + } + }; +} + +impl RegisterParachain { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self.parachain, { + let relay_client = self.relay_connection.to_client::().await?; + let relay_sign = self.relay_sign.to_keypair::()?; + let para_client = self.para_connection.to_client::().await?; + + // hopefully we're the only actor that is registering parachain right now + // => read next parachain id + let para_id_key = bp_runtime::storage_value_final_key( + PARAS_REGISTRAR_PALLET_NAME.as_bytes(), + NEXT_FREE_PARA_ID_STORAGE_NAME.as_bytes(), + ); + let para_id: ParaId = relay_client + .storage_value(StorageKey(para_id_key.to_vec()), None) + .await? + .unwrap_or(polkadot_primitives::v1::LOWEST_PUBLIC_ID) + .max(polkadot_primitives::v1::LOWEST_PUBLIC_ID); + log::info!(target: "bridge", "Going to reserve parachain id: {:?}", para_id); + + // step 1: reserve a parachain id + let relay_genesis_hash = *relay_client.genesis_hash(); + let relay_sudo_account: AccountIdOf = relay_sign.public().into(); + let reserve_parachain_id_call: CallOf = + ParaRegistrarCall::reserve {}.into(); + let reserve_parachain_signer = relay_sign.clone(); + wait_until_transaction_is_finalized::( + relay_client + .submit_and_watch_signed_extrinsic( + relay_sudo_account.clone(), + move |_, transaction_nonce| { + Bytes( + Relaychain::sign_transaction( + relay_genesis_hash, + &reserve_parachain_signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + reserve_parachain_id_call, + transaction_nonce, + ), + ) + .encode(), + ) + }, + ) + .await?, + ) + .await?; + log::info!(target: "bridge", "Reserved parachain id: {:?}", para_id); + + // step 2: register parathread + let para_genesis_header = para_client.header_by_number(Zero::zero()).await?; + let para_code = para_client + .raw_storage_value(StorageKey(CODE.to_vec()), Some(para_genesis_header.hash())) + .await? + .ok_or_else(|| { + anyhow::format_err!("Cannot fetch validation code of {}", Parachain::NAME) + })? + .0; + log::info!( + target: "bridge", + "Going to register parachain {:?}: genesis len = {} code len = {}", + para_id, + para_genesis_header.encode().len(), + para_code.len(), + ); + let register_parathread_call: CallOf = ParaRegistrarCall::register { + id: para_id, + genesis_head: ParaHeadData(para_genesis_header.encode()), + validation_code: ParaValidationCode(para_code), + } + .into(); + let register_parathread_signer = relay_sign.clone(); + wait_until_transaction_is_finalized::( + relay_client + .submit_and_watch_signed_extrinsic( + relay_sudo_account.clone(), + move |_, transaction_nonce| { + Bytes( + Relaychain::sign_transaction( + relay_genesis_hash, + ®ister_parathread_signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + register_parathread_call, + transaction_nonce, + ), + ) + .encode(), + ) + }, + ) + .await?, + ) + .await?; + log::info!(target: "bridge", "Registered parachain: {:?}. Waiting for onboarding", para_id); + + // wait until parathread is onboarded + let para_state_key = bp_runtime::storage_map_final_key_twox64_concat( + PARAS_PALLET_NAME, + PARAS_LIFECYCLES_STORAGE_NAME, + ¶_id.encode(), + ); + wait_para_state( + &relay_client, + ¶_state_key.0, + &[ParaLifecycle::Onboarding, ParaLifecycle::Parathread], + ParaLifecycle::Parathread, + ) + .await?; + + // step 3: force parachain leases + let lease_begin = self.lease_begin; + let lease_end = self.lease_end; + let para_deposit = self.deposit.cast().into(); + log::info!( + target: "bridge", + "Going to force leases of parachain {:?}: [{}; {}]", + para_id, + lease_begin, + lease_end, + ); + let force_lease_call: CallOf = SudoCall::sudo { + call: Box::new( + ParaSlotsCall::force_lease { + para: para_id, + leaser: relay_sudo_account.clone(), + amount: para_deposit, + period_begin: lease_begin, + period_count: lease_end.saturating_sub(lease_begin).saturating_add(1), + } + .into(), + ), + } + .into(); + let force_lease_signer = relay_sign.clone(); + relay_client + .submit_signed_extrinsic(relay_sudo_account.clone(), move |_, transaction_nonce| { + Bytes( + Relaychain::sign_transaction( + relay_genesis_hash, + &force_lease_signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new(force_lease_call, transaction_nonce), + ) + .encode(), + ) + }) + .await?; + log::info!(target: "bridge", "Registered parachain leases: {:?}. Waiting for onboarding", para_id); + + // wait until parachain is onboarded + wait_para_state( + &relay_client, + ¶_state_key.0, + &[ + ParaLifecycle::Onboarding, + ParaLifecycle::UpgradingParathread, + ParaLifecycle::Parathread, + ], + ParaLifecycle::Parachain, + ) + .await?; + + Ok(()) + }) + } +} + +/// Wait until parachain state is changed. +async fn wait_para_state( + relay_client: &Client, + para_state_key: &[u8], + from_states: &[ParaLifecycle], + to_state: ParaLifecycle, +) -> anyhow::Result<()> { + loop { + let para_state: ParaLifecycle = relay_client + .storage_value(StorageKey(para_state_key.to_vec()), None) + .await? + .ok_or_else(|| { + anyhow::format_err!( + "Cannot fetch next free parachain lifecycle from the runtime storage of {}", + Relaychain::NAME, + ) + })?; + if para_state == to_state { + log::info!(target: "bridge", "Parachain state is now: {:?}", to_state); + return Ok(()) + } + if !from_states.contains(¶_state) { + return Err(anyhow::format_err!("Invalid parachain lifecycle: {:?}", para_state)) + } + + log::info!(target: "bridge", "Parachain state: {:?}. Waiting for {:?}", para_state, to_state); + async_std::task::sleep(Relaychain::AVERAGE_BLOCK_INTERVAL).await; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn register_rialto_parachain() { + let register_parachain = RegisterParachain::from_iter(vec![ + "register-parachain", + "rialto-parachain", + "--parachain-host", + "127.0.0.1", + "--parachain-port", + "11949", + "--relaychain-host", + "127.0.0.1", + "--relaychain-port", + "9944", + "--relaychain-signer", + "//Alice", + "--deposit", + "42", + "--lease-begin", + "100", + "--lease-end", + "200", + ]); + + assert_eq!( + register_parachain, + RegisterParachain { + parachain: Parachain::RialtoParachain, + deposit: Balance(42), + lease_begin: 100, + lease_end: 200, + relay_connection: RelaychainConnectionParams { + relaychain_host: "127.0.0.1".into(), + relaychain_port: 9944, + relaychain_secure: false, + }, + relay_sign: RelaychainSigningParams { + relaychain_signer: Some("//Alice".into()), + relaychain_signer_password: None, + relaychain_signer_file: None, + relaychain_signer_password_file: None, + relaychain_transactions_mortality: None, + }, + para_connection: ParachainConnectionParams { + parachain_host: "127.0.0.1".into(), + parachain_port: 11949, + parachain_secure: false, + }, + } + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs index ec521c2918d85ae078b3b3e928dec7e624d73b4a..82c55965a991aa7870fca627bbd61993b6aa29b0 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs @@ -14,17 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::{PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams}; -use crate::finality_pipeline::SubstrateFinalitySyncPipeline; -use structopt::{clap::arg_enum, StructOpt}; +use structopt::StructOpt; +use strum::{EnumString, EnumVariantNames, VariantNames}; + +use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; +use substrate_relay_helper::finality_pipeline::SubstrateFinalitySyncPipeline; + +use crate::cli::{ + PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams, +}; /// Start headers relayer process. #[derive(StructOpt)] pub struct RelayHeaders { /// A bridge instance to relay headers for. - #[structopt(possible_values = &RelayHeadersBridge::variants(), case_insensitive = true)] + #[structopt(possible_values = RelayHeadersBridge::VARIANTS, case_insensitive = true)] bridge: RelayHeadersBridge, - /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) are relayed. + /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) + /// are relayed. #[structopt(long)] only_mandatory_headers: bool, #[structopt(flatten)] @@ -37,17 +44,17 @@ pub struct RelayHeaders { prometheus_params: PrometheusParams, } -// TODO [#851] Use kebab-case. -arg_enum! { - #[derive(Debug)] - /// Headers relay bridge. - pub enum RelayHeadersBridge { - MillauToRialto, - RialtoToMillau, - WestendToMillau, - RococoToWococo, - WococoToRococo, - } +#[derive(Debug, EnumString, EnumVariantNames)] +#[strum(serialize_all = "kebab_case")] +/// Headers relay bridge. +pub enum RelayHeadersBridge { + MillauToRialto, + RialtoToMillau, + WestendToMillau, + RococoToWococo, + WococoToRococo, + KusamaToPolkadot, + PolkadotToKusama, } macro_rules! select_bridge { @@ -59,35 +66,49 @@ macro_rules! select_bridge { type Finality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; $generic - } + }, RelayHeadersBridge::RialtoToMillau => { type Source = relay_rialto_client::Rialto; type Target = relay_millau_client::Millau; type Finality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; $generic - } + }, RelayHeadersBridge::WestendToMillau => { type Source = relay_westend_client::Westend; type Target = relay_millau_client::Millau; type Finality = crate::chains::westend_headers_to_millau::WestendFinalityToMillau; $generic - } + }, RelayHeadersBridge::RococoToWococo => { type Source = relay_rococo_client::Rococo; type Target = relay_wococo_client::Wococo; type Finality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo; $generic - } + }, RelayHeadersBridge::WococoToRococo => { type Source = relay_wococo_client::Wococo; type Target = relay_rococo_client::Rococo; type Finality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo; $generic - } + }, + RelayHeadersBridge::KusamaToPolkadot => { + type Source = relay_kusama_client::Kusama; + type Target = relay_polkadot_client::Polkadot; + type Finality = crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot; + + $generic + }, + RelayHeadersBridge::PolkadotToKusama => { + type Source = relay_polkadot_client::Polkadot; + type Target = relay_kusama_client::Kusama; + type Finality = crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama; + + $generic + }, } }; } @@ -98,16 +119,20 @@ impl RelayHeaders { select_bridge!(self.bridge, { let source_client = self.source.to_client::().await?; let target_client = self.target.to_client::().await?; + let target_transactions_mortality = self.target_sign.target_transactions_mortality; let target_sign = self.target_sign.to_keypair::()?; let metrics_params = Finality::customize_metrics(self.prometheus_params.into())?; + GlobalMetrics::new()?.register_and_spawn(&metrics_params.registry)?; + let finality = Finality::new(target_client.clone(), target_sign); finality.start_relay_guards(); - crate::finality_pipeline::run( + substrate_relay_helper::finality_pipeline::run( finality, source_client, target_client, self.only_mandatory_headers, + target_transactions_mortality, metrics_params, ) .await diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs index e71ea6aeaa2f8aeef17353abea7f02f62b2e59a2..9d76a0296fb2cc432cd9046e2e230162de3cd4fc 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs @@ -22,20 +22,40 @@ //! 2) add `declare_bridge_options!(...)` for the bridge; //! 3) add bridge support to the `select_bridge! { ... }` macro. -use crate::cli::{CliChain, HexLaneId, PrometheusParams}; -use crate::declare_chain_options; -use crate::messages_lane::MessagesRelayParams; -use crate::on_demand_headers::OnDemandHeadersRelay; - use futures::{FutureExt, TryFutureExt}; -use relay_utils::metrics::MetricsParams; use structopt::StructOpt; +use strum::VariantNames; + +use codec::Encode; +use messages_relay::relay_strategy::MixStrategy; +use relay_substrate_client::{ + AccountIdOf, Chain, Client, TransactionSignScheme, UnsignedTransaction, +}; +use relay_utils::metrics::MetricsParams; +use sp_core::{Bytes, Pair}; +use substrate_relay_helper::{ + messages_lane::MessagesRelayParams, on_demand_headers::OnDemandHeadersRelay, +}; + +use crate::{ + cli::{relay_messages::RelayerMode, CliChain, HexLaneId, PrometheusParams}, + declare_chain_options, +}; + +/// Maximal allowed conversion rate error ratio (abs(real - stored) / stored) that we allow. +/// +/// If it is zero, then transaction will be submitted every time we see difference between +/// stored and real conversion rates. If it is large enough (e.g. > than 10 percents, which is 0.1), +/// then rational relayers may stop relaying messages because they were submitted using +/// lesser conversion rate. +const CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO: f64 = 0.05; /// Start headers+messages relayer process. #[derive(StructOpt)] pub enum RelayHeadersAndMessages { MillauRialto(MillauRialtoHeadersAndMessages), RococoWococo(RococoWococoHeadersAndMessages), + KusamaPolkadot(KusamaPolkadotHeadersAndMessages), } /// Parameters that have the same names across all bridges. @@ -44,13 +64,22 @@ pub struct HeadersAndMessagesSharedParams { /// Hex-encoded lane identifiers that should be served by the complex relay. #[structopt(long, default_value = "00000000")] lane: Vec, + #[structopt(long, possible_values = RelayerMode::VARIANTS, case_insensitive = true, default_value = "rational")] + relayer_mode: RelayerMode, + /// Create relayers fund accounts on both chains, if it does not exists yet. + #[structopt(long)] + create_relayers_fund_accounts: bool, + /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) + /// are relayed. + #[structopt(long)] + only_mandatory_headers: bool, #[structopt(flatten)] prometheus_params: PrometheusParams, } -// The reason behind this macro is that 'normal' relays are using source and target chains terminology, -// which is unusable for both-way relays (if you're relaying headers from Rialto to Millau and from -// Millau to Rialto, then which chain is source?). +// The reason behind this macro is that 'normal' relays are using source and target chains +// terminology, which is unusable for both-way relays (if you're relaying headers from Rialto to +// Millau and from Millau to Rialto, then which chain is source?). macro_rules! declare_bridge_options { ($chain1:ident, $chain2:ident) => { paste::item! { @@ -64,12 +93,15 @@ macro_rules! declare_bridge_options { #[structopt(flatten)] left_sign: [<$chain1 SigningParams>], #[structopt(flatten)] + left_messages_pallet_owner: [<$chain1 MessagesPalletOwnerSigningParams>], + #[structopt(flatten)] right: [<$chain2 ConnectionParams>], #[structopt(flatten)] right_sign: [<$chain2 SigningParams>], + #[structopt(flatten)] + right_messages_pallet_owner: [<$chain2 MessagesPalletOwnerSigningParams>], } - #[allow(unreachable_patterns)] impl From for [<$chain1 $chain2 HeadersAndMessages>] { fn from(relay_params: RelayHeadersAndMessages) -> [<$chain1 $chain2 HeadersAndMessages>] { match relay_params { @@ -91,40 +123,205 @@ macro_rules! select_bridge { type Left = relay_millau_client::Millau; type Right = relay_rialto_client::Rialto; - type LeftToRightFinality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; - type RightToLeftFinality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; - - type LeftToRightMessages = crate::chains::millau_messages_to_rialto::MillauMessagesToRialto; - type RightToLeftMessages = crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau; - - const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_millau::BlockNumber = bp_millau::SESSION_LENGTH; - const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_rialto::BlockNumber = bp_rialto::SESSION_LENGTH; + type LeftToRightFinality = + crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; + type RightToLeftFinality = + crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; + + type LeftAccountIdConverter = bp_millau::AccountIdConverter; + type RightAccountIdConverter = bp_rialto::AccountIdConverter; + + const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_millau::BlockNumber = + bp_millau::SESSION_LENGTH; + const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_rialto::BlockNumber = + bp_rialto::SESSION_LENGTH; + + use crate::chains::{ + millau_messages_to_rialto::{ + standalone_metrics as left_to_right_standalone_metrics, + run as left_to_right_messages, + update_rialto_to_millau_conversion_rate as update_right_to_left_conversion_rate, + }, + rialto_messages_to_millau::{ + run as right_to_left_messages, + update_millau_to_rialto_conversion_rate as update_left_to_right_conversion_rate, + }, + }; + + async fn left_create_account( + _left_client: Client, + _left_sign: ::AccountKeyPair, + _account_id: AccountIdOf, + ) -> anyhow::Result<()> { + Err(anyhow::format_err!("Account creation is not supported by this bridge")) + } - use crate::chains::millau_messages_to_rialto::run as left_to_right_messages; - use crate::chains::rialto_messages_to_millau::run as right_to_left_messages; + async fn right_create_account( + _right_client: Client, + _right_sign: ::AccountKeyPair, + _account_id: AccountIdOf, + ) -> anyhow::Result<()> { + Err(anyhow::format_err!("Account creation is not supported by this bridge")) + } $generic - } + }, RelayHeadersAndMessages::RococoWococo(_) => { type Params = RococoWococoHeadersAndMessages; type Left = relay_rococo_client::Rococo; type Right = relay_wococo_client::Wococo; - type LeftToRightFinality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo; - type RightToLeftFinality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo; + type LeftToRightFinality = + crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo; + type RightToLeftFinality = + crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo; + + type LeftAccountIdConverter = bp_rococo::AccountIdConverter; + type RightAccountIdConverter = bp_wococo::AccountIdConverter; + + const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_rococo::BlockNumber = + bp_rococo::SESSION_LENGTH; + const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_wococo::BlockNumber = + bp_wococo::SESSION_LENGTH; + + use crate::chains::{ + rococo_messages_to_wococo::{ + standalone_metrics as left_to_right_standalone_metrics, + run as left_to_right_messages, + }, + wococo_messages_to_rococo::{ + run as right_to_left_messages, + }, + }; + + async fn update_right_to_left_conversion_rate( + _client: Client, + _signer: ::AccountKeyPair, + _updated_rate: f64, + ) -> anyhow::Result<()> { + Err(anyhow::format_err!("Conversion rate is not supported by this bridge")) + } + + async fn update_left_to_right_conversion_rate( + _client: Client, + _signer: ::AccountKeyPair, + _updated_rate: f64, + ) -> anyhow::Result<()> { + Err(anyhow::format_err!("Conversion rate is not supported by this bridge")) + } + + async fn left_create_account( + _left_client: Client, + _left_sign: ::AccountKeyPair, + _account_id: AccountIdOf, + ) -> anyhow::Result<()> { + Err(anyhow::format_err!("Account creation is not supported by this bridge")) + } - type LeftToRightMessages = crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo; - type RightToLeftMessages = crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo; + async fn right_create_account( + _right_client: Client, + _right_sign: ::AccountKeyPair, + _account_id: AccountIdOf, + ) -> anyhow::Result<()> { + Err(anyhow::format_err!("Account creation is not supported by this bridge")) + } - const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_rococo::BlockNumber = bp_rococo::SESSION_LENGTH; - const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_wococo::BlockNumber = bp_wococo::SESSION_LENGTH; + $generic + }, + RelayHeadersAndMessages::KusamaPolkadot(_) => { + type Params = KusamaPolkadotHeadersAndMessages; + + type Left = relay_kusama_client::Kusama; + type Right = relay_polkadot_client::Polkadot; + + type LeftToRightFinality = + crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot; + type RightToLeftFinality = + crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama; + + type LeftAccountIdConverter = bp_kusama::AccountIdConverter; + type RightAccountIdConverter = bp_polkadot::AccountIdConverter; + + const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_kusama::BlockNumber = + bp_kusama::SESSION_LENGTH; + const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_polkadot::BlockNumber = + bp_polkadot::SESSION_LENGTH; + + use crate::chains::{ + kusama_messages_to_polkadot::{ + standalone_metrics as left_to_right_standalone_metrics, + run as left_to_right_messages, + update_polkadot_to_kusama_conversion_rate as update_right_to_left_conversion_rate, + }, + polkadot_messages_to_kusama::{ + run as right_to_left_messages, + update_kusama_to_polkadot_conversion_rate as update_left_to_right_conversion_rate, + }, + }; + + async fn left_create_account( + left_client: Client, + left_sign: ::AccountKeyPair, + account_id: AccountIdOf, + ) -> anyhow::Result<()> { + let left_genesis_hash = *left_client.genesis_hash(); + left_client + .submit_signed_extrinsic( + left_sign.public().into(), + move |_, transaction_nonce| { + Bytes( + Left::sign_transaction(left_genesis_hash, &left_sign, relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + relay_kusama_client::runtime::Call::Balances( + relay_kusama_client::runtime::BalancesCall::transfer( + bp_kusama::AccountAddress::Id(account_id), + bp_kusama::EXISTENTIAL_DEPOSIT.into(), + ), + ), + transaction_nonce, + ), + ).encode() + ) + }, + ) + .await + .map(drop) + .map_err(|e| anyhow::format_err!("{}", e)) + } - use crate::chains::rococo_messages_to_wococo::run as left_to_right_messages; - use crate::chains::wococo_messages_to_rococo::run as right_to_left_messages; + async fn right_create_account( + right_client: Client, + right_sign: ::AccountKeyPair, + account_id: AccountIdOf, + ) -> anyhow::Result<()> { + let right_genesis_hash = *right_client.genesis_hash(); + right_client + .submit_signed_extrinsic( + right_sign.public().into(), + move |_, transaction_nonce| { + Bytes( + Right::sign_transaction(right_genesis_hash, &right_sign, relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + relay_polkadot_client::runtime::Call::Balances( + relay_polkadot_client::runtime::BalancesCall::transfer( + bp_polkadot::AccountAddress::Id(account_id), + bp_polkadot::EXISTENTIAL_DEPOSIT.into(), + ), + ), + transaction_nonce, + ), + ).encode() + ) + }, + ) + .await + .map(drop) + .map_err(|e| anyhow::format_err!("{}", e)) + } $generic - } + }, } }; } @@ -134,9 +331,12 @@ declare_chain_options!(Millau, millau); declare_chain_options!(Rialto, rialto); declare_chain_options!(Rococo, rococo); declare_chain_options!(Wococo, wococo); +declare_chain_options!(Kusama, kusama); +declare_chain_options!(Polkadot, polkadot); // All supported bridges. declare_bridge_options!(Millau, Rialto); declare_bridge_options!(Rococo, Wococo); +declare_bridge_options!(Kusama, Polkadot); impl RelayHeadersAndMessages { /// Run the command. @@ -145,26 +345,170 @@ impl RelayHeadersAndMessages { let params: Params = self.into(); let left_client = params.left.to_client::().await?; + let left_transactions_mortality = params.left_sign.transactions_mortality()?; let left_sign = params.left_sign.to_keypair::()?; + let left_messages_pallet_owner = + params.left_messages_pallet_owner.to_keypair::()?; let right_client = params.right.to_client::().await?; + let right_transactions_mortality = params.right_sign.transactions_mortality()?; let right_sign = params.right_sign.to_keypair::()?; + let right_messages_pallet_owner = + params.right_messages_pallet_owner.to_keypair::()?; let lanes = params.shared.lane; + let relayer_mode = params.shared.relayer_mode.into(); + let relay_strategy = MixStrategy::new(relayer_mode); + // create metrics registry and register standalone metrics let metrics_params: MetricsParams = params.shared.prometheus_params.into(); - let metrics_params = relay_utils::relay_metrics(None, metrics_params).into_params(); + let metrics_params = relay_utils::relay_metrics(metrics_params).into_params(); + let left_to_right_metrics = + left_to_right_standalone_metrics(left_client.clone(), right_client.clone())?; + let right_to_left_metrics = left_to_right_metrics.clone().reverse(); + + // start conversion rate update loops for left/right chains + if let Some(left_messages_pallet_owner) = left_messages_pallet_owner { + let left_client = left_client.clone(); + let format_err = || { + anyhow::format_err!( + "Cannon run conversion rate updater: {} -> {}", + Right::NAME, + Left::NAME + ) + }; + substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop( + left_to_right_metrics + .target_to_source_conversion_rate + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), + left_to_right_metrics + .target_to_base_conversion_rate + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), + left_to_right_metrics + .source_to_base_conversion_rate + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), + CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO, + move |new_rate| { + log::info!( + target: "bridge", + "Going to update {} -> {} (on {}) conversion rate to {}.", + Right::NAME, + Left::NAME, + Left::NAME, + new_rate, + ); + update_right_to_left_conversion_rate( + left_client.clone(), + left_messages_pallet_owner.clone(), + new_rate, + ) + }, + ); + } + if let Some(right_messages_pallet_owner) = right_messages_pallet_owner { + let right_client = right_client.clone(); + let format_err = || { + anyhow::format_err!( + "Cannon run conversion rate updater: {} -> {}", + Left::NAME, + Right::NAME + ) + }; + substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop( + right_to_left_metrics + .target_to_source_conversion_rate + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), + left_to_right_metrics + .source_to_base_conversion_rate + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), + left_to_right_metrics + .target_to_base_conversion_rate + .as_ref() + .ok_or_else(format_err)? + .shared_value_ref(), + CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO, + move |new_rate| { + log::info!( + target: "bridge", + "Going to update {} -> {} (on {}) conversion rate to {}.", + Left::NAME, + Right::NAME, + Right::NAME, + new_rate, + ); + update_left_to_right_conversion_rate( + right_client.clone(), + right_messages_pallet_owner.clone(), + new_rate, + ) + }, + ); + } + + // optionally, create relayers fund account + if params.shared.create_relayers_fund_accounts { + let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::< + AccountIdOf, + LeftAccountIdConverter, + >(); + let relayers_fund_account_balance = + left_client.free_native_balance(relayer_fund_acount_id.clone()).await; + if let Err(relay_substrate_client::Error::AccountDoesNotExist) = + relayers_fund_account_balance + { + log::info!(target: "bridge", "Going to create relayers fund account at {}.", Left::NAME); + left_create_account( + left_client.clone(), + left_sign.clone(), + relayer_fund_acount_id, + ) + .await?; + } + + let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::< + AccountIdOf, + RightAccountIdConverter, + >(); + let relayers_fund_account_balance = + right_client.free_native_balance(relayer_fund_acount_id.clone()).await; + if let Err(relay_substrate_client::Error::AccountDoesNotExist) = + relayers_fund_account_balance + { + log::info!(target: "bridge", "Going to create relayers fund account at {}.", Right::NAME); + right_create_account( + right_client.clone(), + right_sign.clone(), + relayer_fund_acount_id, + ) + .await?; + } + } + // start on-demand header relays let left_to_right_on_demand_headers = OnDemandHeadersRelay::new( left_client.clone(), right_client.clone(), + right_transactions_mortality, LeftToRightFinality::new(right_client.clone(), right_sign.clone()), MAX_MISSING_LEFT_HEADERS_AT_RIGHT, + params.shared.only_mandatory_headers, ); let right_to_left_on_demand_headers = OnDemandHeadersRelay::new( right_client.clone(), left_client.clone(), + left_transactions_mortality, RightToLeftFinality::new(left_client.clone(), left_sign.clone()), MAX_MISSING_RIGHT_HEADERS_AT_LEFT, + params.shared.only_mandatory_headers, ); // Need 2x capacity since we consider both directions for each lane @@ -174,28 +518,32 @@ impl RelayHeadersAndMessages { let left_to_right_messages = left_to_right_messages(MessagesRelayParams { source_client: left_client.clone(), source_sign: left_sign.clone(), + source_transactions_mortality: left_transactions_mortality, target_client: right_client.clone(), target_sign: right_sign.clone(), + target_transactions_mortality: right_transactions_mortality, source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()), target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()), lane_id: lane, - metrics_params: metrics_params.clone().disable().metrics_prefix( - messages_relay::message_lane_loop::metrics_prefix::(&lane), - ), + metrics_params: metrics_params.clone().disable(), + standalone_metrics: Some(left_to_right_metrics.clone()), + relay_strategy: relay_strategy.clone(), }) .map_err(|e| anyhow::format_err!("{}", e)) .boxed(); let right_to_left_messages = right_to_left_messages(MessagesRelayParams { source_client: right_client.clone(), source_sign: right_sign.clone(), + source_transactions_mortality: right_transactions_mortality, target_client: left_client.clone(), target_sign: left_sign.clone(), + target_transactions_mortality: left_transactions_mortality, source_to_target_headers_relay: Some(right_to_left_on_demand_headers.clone()), target_to_source_headers_relay: Some(left_to_right_on_demand_headers.clone()), lane_id: lane, - metrics_params: metrics_params.clone().disable().metrics_prefix( - messages_relay::message_lane_loop::metrics_prefix::(&lane), - ), + metrics_params: metrics_params.clone().disable(), + standalone_metrics: Some(right_to_left_metrics.clone()), + relay_strategy: relay_strategy.clone(), }) .map_err(|e| anyhow::format_err!("{}", e)) .boxed(); @@ -204,7 +552,7 @@ impl RelayHeadersAndMessages { message_relays.push(right_to_left_messages); } - relay_utils::relay_metrics(None, metrics_params) + relay_utils::relay_metrics(metrics_params) .expose() .await .map_err(|e| anyhow::format_err!("{}", e))?; diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs index 94630886ca38e984ebd172373be7b74ffa8eb194..e47abfc5d94e34389456659b7a3ab94dc439daaa 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs @@ -14,25 +14,51 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::bridge::FullBridge; -use crate::cli::{ - HexLaneId, PrometheusParams, SourceConnectionParams, SourceSigningParams, TargetConnectionParams, - TargetSigningParams, +use structopt::StructOpt; +use strum::{EnumString, EnumVariantNames, VariantNames}; + +use messages_relay::relay_strategy::MixStrategy; +use substrate_relay_helper::messages_lane::MessagesRelayParams; + +use crate::{ + cli::{ + bridge::FullBridge, HexLaneId, PrometheusParams, SourceConnectionParams, + SourceSigningParams, TargetConnectionParams, TargetSigningParams, + }, + select_full_bridge, }; -use crate::messages_lane::MessagesRelayParams; -use crate::select_full_bridge; -use structopt::StructOpt; +/// Relayer operating mode. +#[derive(Debug, EnumString, EnumVariantNames, Clone, Copy, PartialEq)] +#[strum(serialize_all = "kebab_case")] +pub enum RelayerMode { + /// The relayer doesn't care about rewards. + Altruistic, + /// The relayer will deliver all messages and confirmations as long as he's not losing any + /// funds. + Rational, +} + +impl From for messages_relay::message_lane_loop::RelayerMode { + fn from(mode: RelayerMode) -> Self { + match mode { + RelayerMode::Altruistic => Self::Altruistic, + RelayerMode::Rational => Self::Rational, + } + } +} /// Start messages relayer process. #[derive(StructOpt)] pub struct RelayMessages { /// A bridge instance to relay messages for. - #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] bridge: FullBridge, /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. #[structopt(long, default_value = "00000000")] lane: HexLaneId, + #[structopt(long, possible_values = RelayerMode::VARIANTS, case_insensitive = true, default_value = "rational")] + relayer_mode: RelayerMode, #[structopt(flatten)] source: SourceConnectionParams, #[structopt(flatten)] @@ -51,21 +77,69 @@ impl RelayMessages { select_full_bridge!(self.bridge, { let source_client = self.source.to_client::().await?; let source_sign = self.source_sign.to_keypair::()?; + let source_transactions_mortality = self.source_sign.transactions_mortality()?; let target_client = self.target.to_client::().await?; let target_sign = self.target_sign.to_keypair::()?; + let target_transactions_mortality = self.target_sign.transactions_mortality()?; + let relayer_mode = self.relayer_mode.into(); + let relay_strategy = MixStrategy::new(relayer_mode); relay_messages(MessagesRelayParams { source_client, source_sign, + source_transactions_mortality, target_client, target_sign, + target_transactions_mortality, source_to_target_headers_relay: None, target_to_source_headers_relay: None, lane_id: self.lane.into(), metrics_params: self.prometheus_params.into(), + standalone_metrics: None, + relay_strategy, }) .await .map_err(|e| anyhow::format_err!("{}", e)) }) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_use_rational_relayer_mode_by_default() { + assert_eq!( + RelayMessages::from_iter(vec![ + "relay-messages", + "rialto-to-millau", + "--source-port=0", + "--source-signer=//Alice", + "--target-port=0", + "--target-signer=//Alice", + "--lane=00000000", + ]) + .relayer_mode, + RelayerMode::Rational, + ); + } + + #[test] + fn should_accept_altruistic_relayer_mode() { + assert_eq!( + RelayMessages::from_iter(vec![ + "relay-messages", + "rialto-to-millau", + "--source-port=0", + "--source-signer=//Alice", + "--target-port=0", + "--target-signer=//Alice", + "--lane=00000000", + "--relayer-mode=altruistic", + ]) + .relayer_mode, + RelayerMode::Altruistic, + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs b/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs new file mode 100644 index 0000000000000000000000000000000000000000..64663d7e8ec026032cdb67c26fbf96508cdf3b39 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs @@ -0,0 +1,559 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{Balance, TargetConnectionParams, TargetSigningParams}; + +use codec::{Decode, Encode}; +use num_traits::{One, Zero}; +use relay_substrate_client::{ + BlockWithJustification, Chain, Client, Error as SubstrateError, HeaderOf, TransactionSignScheme, +}; +use relay_utils::FailedClient; +use sp_core::Bytes; +use sp_runtime::{ + traits::{Hash, Header as HeaderT}, + transaction_validity::TransactionPriority, +}; +use structopt::StructOpt; +use strum::{EnumString, EnumVariantNames, VariantNames}; + +/// Start resubmit transactions process. +#[derive(StructOpt)] +pub struct ResubmitTransactions { + /// A bridge instance to relay headers for. + #[structopt(possible_values = RelayChain::VARIANTS, case_insensitive = true)] + chain: RelayChain, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + /// Number of blocks we see before considering queued transaction as stalled. + #[structopt(long, default_value = "5")] + stalled_blocks: u32, + /// Tip limit. We'll never submit transaction with larger tip. + #[structopt(long)] + tip_limit: Balance, + /// Tip increase step. We'll be checking updated transaction priority by increasing its tip by + /// this step. + #[structopt(long)] + tip_step: Balance, + /// Priority selection strategy. + #[structopt(subcommand)] + strategy: PrioritySelectionStrategy, +} + +/// Chain, which transactions we're going to track && resubmit. +#[derive(Debug, EnumString, EnumVariantNames)] +#[strum(serialize_all = "kebab_case")] +pub enum RelayChain { + Millau, + Kusama, + Polkadot, +} + +/// Strategy to use for priority selection. +#[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy)] +pub enum PrioritySelectionStrategy { + /// Strategy selects tip that changes transaction priority to be better than priority of + /// the first transaction of previous block. + /// + /// It only makes sense to use this strategy for Millau transactions. Millau has transactions + /// that are close to block limits, so if there are any other queued transactions, 'large' + /// transaction won't fit the block && will be postponed. To avoid this, we change its priority + /// to some large value, making it best transaction => it'll be 'mined' first. + MakeItBestTransaction, + /// Strategy selects tip that changes transaction priority to be better than priority of + /// selected queued transaction. + /// + /// When we first see stalled transaction, we make it better than worst 1/4 of queued + /// transactions. If it is still stalled, we'll make it better than 1/3 of queued transactions, + /// ... + MakeItBetterThanQueuedTransaction, +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + RelayChain::Millau => { + type Target = relay_millau_client::Millau; + type TargetSign = relay_millau_client::Millau; + + $generic + }, + RelayChain::Kusama => { + type Target = relay_kusama_client::Kusama; + type TargetSign = relay_kusama_client::Kusama; + + $generic + }, + RelayChain::Polkadot => { + type Target = relay_polkadot_client::Polkadot; + type TargetSign = relay_polkadot_client::Polkadot; + + $generic + }, + } + }; +} + +impl ResubmitTransactions { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self.chain, { + let relay_loop_name = format!("ResubmitTransactions{}", Target::NAME); + let client = self.target.to_client::().await?; + let key_pair = self.target_sign.to_keypair::()?; + + relay_utils::relay_loop((), client) + .run(relay_loop_name, move |_, client, _| { + run_until_connection_lost::( + client, + key_pair.clone(), + Context { + strategy: self.strategy, + best_header: HeaderOf::::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + transaction: None, + resubmitted: 0, + stalled_for: Zero::zero(), + stalled_for_limit: self.stalled_blocks as _, + tip_step: self.tip_step.cast() as _, + tip_limit: self.tip_limit.cast() as _, + }, + ) + }) + .await + .map_err(Into::into) + }) + } +} + +impl PrioritySelectionStrategy { + /// Select target priority. + async fn select_target_priority>( + &self, + client: &Client, + context: &Context, + ) -> Result, SubstrateError> { + match *self { + PrioritySelectionStrategy::MakeItBestTransaction => + read_previous_block_best_priority::(client, context).await, + PrioritySelectionStrategy::MakeItBetterThanQueuedTransaction => + select_priority_from_queue::(client, context).await, + } + } +} + +#[derive(Debug)] +struct Context { + /// Priority selection strategy. + strategy: PrioritySelectionStrategy, + /// Best known block header. + best_header: C::Header, + /// Hash of the (potentially) stalled transaction. + transaction: Option, + /// How many times we have resubmitted this `transaction`? + resubmitted: u32, + /// This transaction is in pool for `stalled_for` wakeup intervals. + stalled_for: C::BlockNumber, + /// When `stalled_for` reaching this limit, transaction is considered stalled. + stalled_for_limit: C::BlockNumber, + /// Tip step interval. + tip_step: C::Balance, + /// Maximal tip. + tip_limit: C::Balance, +} + +impl Context { + /// Return true if transaction has stalled. + fn is_stalled(&self) -> bool { + self.stalled_for >= self.stalled_for_limit + } + + /// Notice resubmitted transaction. + fn notice_resubmitted_transaction(mut self, transaction: C::Hash) -> Self { + self.transaction = Some(transaction); + self.stalled_for = Zero::zero(); + self.resubmitted += 1; + self + } + + /// Notice transaction from the transaction pool. + fn notice_transaction(mut self, transaction: C::Hash) -> Self { + if self.transaction == Some(transaction) { + self.stalled_for += One::one(); + } else { + self.transaction = Some(transaction); + self.stalled_for = One::one(); + self.resubmitted = 0; + } + self + } +} + +/// Run resubmit transactions loop. +async fn run_until_connection_lost>( + client: Client, + key_pair: S::AccountKeyPair, + mut context: Context, +) -> Result<(), FailedClient> { + loop { + async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await; + + let result = run_loop_iteration::(client.clone(), key_pair.clone(), context).await; + context = match result { + Ok(context) => context, + Err(error) => { + log::error!( + target: "bridge", + "Resubmit {} transactions loop has failed with error: {:?}", + C::NAME, + error, + ); + return Err(FailedClient::Target) + }, + }; + } +} + +/// Run single loop iteration. +async fn run_loop_iteration>( + client: Client, + key_pair: S::AccountKeyPair, + mut context: Context, +) -> Result, SubstrateError> { + // correct best header is required for all other actions + context.best_header = client.best_header().await?; + + // check if there's queued transaction, signed by given author + let original_transaction = match lookup_signer_transaction::(&client, &key_pair).await? { + Some(original_transaction) => original_transaction, + None => { + log::trace!(target: "bridge", "No {} transactions from required signer in the txpool", C::NAME); + return Ok(context) + }, + }; + let original_transaction_hash = C::Hasher::hash(&original_transaction.encode()); + let context = context.notice_transaction(original_transaction_hash); + + // if transaction hasn't been mined for `stalled_blocks`, we'll need to resubmit it + if !context.is_stalled() { + log::trace!( + target: "bridge", + "{} transaction {:?} is not yet stalled ({:?}/{:?})", + C::NAME, + context.transaction, + context.stalled_for, + context.stalled_for_limit, + ); + return Ok(context) + } + + // select priority for updated transaction + let target_priority = + match context.strategy.select_target_priority::(&client, &context).await? { + Some(target_priority) => target_priority, + None => { + log::trace!(target: "bridge", "Failed to select target priority"); + return Ok(context) + }, + }; + + // update transaction tip + let (is_updated, updated_transaction) = update_transaction_tip::( + &client, + &key_pair, + context.best_header.hash(), + original_transaction, + context.tip_step, + context.tip_limit, + target_priority, + ) + .await?; + + if !is_updated { + log::trace!(target: "bridge", "{} transaction tip can not be updated. Reached limit?", C::NAME); + return Ok(context) + } + + let updated_transaction = updated_transaction.encode(); + let updated_transaction_hash = C::Hasher::hash(&updated_transaction); + client.submit_unsigned_extrinsic(Bytes(updated_transaction)).await?; + + log::info!( + target: "bridge", + "Replaced {} transaction {} with {} in txpool", + C::NAME, + original_transaction_hash, + updated_transaction_hash, + ); + + Ok(context.notice_resubmitted_transaction(updated_transaction_hash)) +} + +/// Search transaction pool for transaction, signed by given key pair. +async fn lookup_signer_transaction>( + client: &Client, + key_pair: &S::AccountKeyPair, +) -> Result, SubstrateError> { + let pending_transactions = client.pending_extrinsics().await?; + for pending_transaction in pending_transactions { + let pending_transaction = S::SignedTransaction::decode(&mut &pending_transaction.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?; + if !S::is_signed_by(key_pair, &pending_transaction) { + continue + } + + return Ok(Some(pending_transaction)) + } + + Ok(None) +} + +/// Read priority of best signed transaction of previous block. +async fn read_previous_block_best_priority>( + client: &Client, + context: &Context, +) -> Result, SubstrateError> { + let best_block = client.get_block(Some(context.best_header.hash())).await?; + let best_transaction = best_block + .extrinsics() + .iter() + .filter_map(|xt| S::SignedTransaction::decode(&mut &xt[..]).ok()) + .find(|xt| S::is_signed(xt)); + match best_transaction { + Some(best_transaction) => Ok(Some( + client + .validate_transaction(*context.best_header.parent_hash(), best_transaction) + .await?? + .priority, + )), + None => Ok(None), + } +} + +/// Select priority of some queued transaction. +async fn select_priority_from_queue>( + client: &Client, + context: &Context, +) -> Result, SubstrateError> { + // select transaction from the queue + let queued_transactions = client.pending_extrinsics().await?; + let selected_transaction = match select_transaction_from_queue(queued_transactions, context) { + Some(selected_transaction) => selected_transaction, + None => return Ok(None), + }; + + let selected_transaction = S::SignedTransaction::decode(&mut &selected_transaction[..]) + .map_err(SubstrateError::ResponseParseFailed)?; + let target_priority = client + .validate_transaction(context.best_header.hash(), selected_transaction) + .await?? + .priority; + Ok(Some(target_priority)) +} + +/// Select transaction with target priority from the vec of queued transactions. +fn select_transaction_from_queue( + mut queued_transactions: Vec, + context: &Context, +) -> Option { + if queued_transactions.is_empty() { + return None + } + + // the more times we resubmit transaction (`context.resubmitted`), the closer we move + // to the front of the transaction queue + let total_transactions = queued_transactions.len(); + let resubmitted_factor = context.resubmitted; + let divisor = + 1usize.saturating_add(1usize.checked_shl(resubmitted_factor).unwrap_or(usize::MAX)); + let transactions_to_skip = total_transactions / divisor; + + Some( + queued_transactions + .swap_remove(std::cmp::min(total_transactions - 1, transactions_to_skip)), + ) +} + +/// Try to find appropriate tip for transaction so that its priority is larger than given. +async fn update_transaction_tip>( + client: &Client, + key_pair: &S::AccountKeyPair, + at_block: C::Hash, + tx: S::SignedTransaction, + tip_step: C::Balance, + tip_limit: C::Balance, + target_priority: TransactionPriority, +) -> Result<(bool, S::SignedTransaction), SubstrateError> { + let stx = format!("{:?}", tx); + let mut current_priority = client.validate_transaction(at_block, tx.clone()).await??.priority; + let mut unsigned_tx = S::parse_transaction(tx).ok_or_else(|| { + SubstrateError::Custom(format!("Failed to parse {} transaction {}", C::NAME, stx,)) + })?; + let old_tip = unsigned_tx.tip; + + while current_priority < target_priority { + let next_tip = unsigned_tx.tip + tip_step; + if next_tip > tip_limit { + break + } + + log::trace!( + target: "bridge", + "{} transaction priority with tip={:?}: {}. Target priority: {}", + C::NAME, + unsigned_tx.tip, + current_priority, + target_priority, + ); + + unsigned_tx.tip = next_tip; + current_priority = client + .validate_transaction( + at_block, + S::sign_transaction( + *client.genesis_hash(), + key_pair, + relay_substrate_client::TransactionEra::immortal(), + unsigned_tx.clone(), + ), + ) + .await?? + .priority; + } + + log::debug!( + target: "bridge", + "{} transaction tip has changed from {:?} to {:?}", + C::NAME, + old_tip, + unsigned_tx.tip, + ); + + Ok(( + old_tip != unsigned_tx.tip, + S::sign_transaction( + *client.genesis_hash(), + key_pair, + relay_substrate_client::TransactionEra::immortal(), + unsigned_tx, + ), + )) +} + +#[cfg(test)] +mod tests { + use super::*; + use bp_rialto::Hash; + use relay_rialto_client::Rialto; + + fn context() -> Context { + Context { + strategy: PrioritySelectionStrategy::MakeItBestTransaction, + best_header: HeaderOf::::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ), + transaction: None, + resubmitted: 0, + stalled_for: Zero::zero(), + stalled_for_limit: 3, + tip_step: 100, + tip_limit: 1000, + } + } + + #[test] + fn context_works() { + let mut context = context(); + + // when transaction is noticed 2/3 times, it isn't stalled + context = context.notice_transaction(Default::default()); + assert!(!context.is_stalled()); + assert_eq!(context.stalled_for, 1); + assert_eq!(context.resubmitted, 0); + context = context.notice_transaction(Default::default()); + assert!(!context.is_stalled()); + assert_eq!(context.stalled_for, 2); + assert_eq!(context.resubmitted, 0); + + // when transaction is noticed for 3rd time in a row, it is considered stalled + context = context.notice_transaction(Default::default()); + assert!(context.is_stalled()); + assert_eq!(context.stalled_for, 3); + assert_eq!(context.resubmitted, 0); + + // and after we resubmit it, we forget previous transaction + context = context.notice_resubmitted_transaction(Hash::from([1; 32])); + assert_eq!(context.transaction, Some(Hash::from([1; 32]))); + assert_eq!(context.resubmitted, 1); + assert_eq!(context.stalled_for, 0); + } + + #[test] + fn select_transaction_from_queue_works_with_empty_queue() { + assert_eq!(select_transaction_from_queue(vec![], &context()), None); + } + + #[test] + fn select_transaction_from_queue_works() { + let mut context = context(); + let queued_transactions = vec![ + Bytes(vec![1]), + Bytes(vec![2]), + Bytes(vec![3]), + Bytes(vec![4]), + Bytes(vec![5]), + Bytes(vec![6]), + ]; + + // when we resubmit tx for the first time, 1/2 of queue is skipped + assert_eq!( + select_transaction_from_queue(queued_transactions.clone(), &context), + Some(Bytes(vec![4])), + ); + + // when we resubmit tx for the second time, 1/3 of queue is skipped + context = context.notice_resubmitted_transaction(Hash::from([1; 32])); + assert_eq!( + select_transaction_from_queue(queued_transactions.clone(), &context), + Some(Bytes(vec![3])), + ); + + // when we resubmit tx for the third time, 1/5 of queue is skipped + context = context.notice_resubmitted_transaction(Hash::from([2; 32])); + assert_eq!( + select_transaction_from_queue(queued_transactions.clone(), &context), + Some(Bytes(vec![2])), + ); + + // when we resubmit tx for the second time, 1/9 of queue is skipped + context = context.notice_resubmitted_transaction(Hash::from([3; 32])); + assert_eq!( + select_transaction_from_queue(queued_transactions, &context), + Some(Bytes(vec![1])), + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs index f710f814e41d981756555a9f305cdf6a25a48bf6..3e77ad8342927bdfc5a16f85b72098d22ec246a4 100644 --- a/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs +++ b/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs @@ -14,46 +14,71 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::cli::bridge::FullBridge; -use crate::cli::encode_call::{self, CliEncodeCall}; -use crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee; use crate::cli::{ - Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams, SourceSigningParams, - TargetSigningParams, + bridge::FullBridge, + encode_call::{self, CliEncodeCall}, + estimate_fee::estimate_message_delivery_and_dispatch_fee, + Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams, + SourceSigningParams, TargetSigningParams, }; use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::messages::DispatchFeePayment; +use bp_runtime::BalanceOf; use codec::Encode; use frame_support::weights::Weight; -use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_substrate_client::{Chain, TransactionSignScheme, UnsignedTransaction}; use sp_core::{Bytes, Pair}; use sp_runtime::{traits::IdentifyAccount, AccountId32, MultiSignature, MultiSigner}; use std::fmt::Debug; use structopt::StructOpt; +use strum::{EnumString, EnumVariantNames, VariantNames}; + +/// Relayer operating mode. +#[derive(Debug, EnumString, EnumVariantNames, Clone, Copy, PartialEq, Eq)] +#[strum(serialize_all = "kebab_case")] +pub enum DispatchFeePayment { + /// The dispatch fee is paid at the source chain. + AtSourceChain, + /// The dispatch fee is paid at the target chain. + AtTargetChain, +} + +impl From for bp_runtime::messages::DispatchFeePayment { + fn from(dispatch_fee_payment: DispatchFeePayment) -> Self { + match dispatch_fee_payment { + DispatchFeePayment::AtSourceChain => Self::AtSourceChain, + DispatchFeePayment::AtTargetChain => Self::AtTargetChain, + } + } +} /// Send bridge message. #[derive(StructOpt)] pub struct SendMessage { /// A bridge instance to encode call for. - #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] bridge: FullBridge, #[structopt(flatten)] source: SourceConnectionParams, #[structopt(flatten)] source_sign: SourceSigningParams, - /// The SURI of secret key to use when transactions are submitted to the Target node. - #[structopt(long, required_if("origin", "Target"))] - target_signer: Option, - /// The password for the SURI of secret key to use when transactions are submitted to the Target node. - #[structopt(long)] - target_signer_password: Option, + #[structopt(flatten)] + target_sign: TargetSigningParams, /// Hex-encoded lane id. Defaults to `00000000`. #[structopt(long, default_value = "00000000")] lane: HexLaneId, + /// Where dispatch fee is paid? + #[structopt( + long, + possible_values = DispatchFeePayment::VARIANTS, + case_insensitive = true, + default_value = "at-source-chain", + )] + dispatch_fee_payment: DispatchFeePayment, /// Dispatch weight of the message. If not passed, determined automatically. #[structopt(long)] dispatch_weight: Option>, - /// Delivery and dispatch fee in source chain base currency units. If not passed, determined automatically. + /// Delivery and dispatch fee in source chain base currency units. If not passed, determined + /// automatically. #[structopt(long)] fee: Option, /// Message type. @@ -72,9 +97,9 @@ impl SendMessage { crate::select_full_bridge!(self.bridge, { let SendMessage { source_sign, - target_signer, - target_signer_password, + target_sign, ref mut message, + dispatch_fee_payment, dispatch_weight, origin, bridge, @@ -101,12 +126,6 @@ impl SendMessage { match origin { Origins::Source => CallOrigin::SourceAccount(source_account_id), Origins::Target => { - let target_sign = TargetSigningParams { - target_signer: target_signer.clone().ok_or_else(|| { - anyhow::format_err!("The argument target_signer is not available") - })?, - target_signer_password: target_signer_password.clone(), - }; let target_sign = target_sign.to_keypair::()?; let digest = account_ownership_digest( &target_call, @@ -120,9 +139,10 @@ impl SendMessage { target_origin_public.into(), digest_signature.into(), ) - } + }, }, &target_call, + *dispatch_fee_payment, ) }; Ok(payload) @@ -141,7 +161,7 @@ impl SendMessage { let fee = match self.fee { Some(fee) => fee, None => Balance( - estimate_message_delivery_and_dispatch_fee::<::Balance, _, _>( + estimate_message_delivery_and_dispatch_fee::, _, _>( &source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, @@ -158,24 +178,46 @@ impl SendMessage { fee, })?; + let source_genesis_hash = *source_client.genesis_hash(); + let estimated_transaction_fee = source_client + .estimate_extrinsic_fee(Bytes( + Source::sign_transaction( + source_genesis_hash, + &source_sign, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new(send_message_call.clone(), 0), + ) + .encode(), + )) + .await?; source_client - .submit_signed_extrinsic(source_sign.public().into(), |transaction_nonce| { + .submit_signed_extrinsic(source_sign.public().into(), move |_, transaction_nonce| { let signed_source_call = Source::sign_transaction( - *source_client.genesis_hash(), + source_genesis_hash, &source_sign, - transaction_nonce, - send_message_call, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new(send_message_call, transaction_nonce), ) .encode(); log::info!( target: "bridge", - "Sending message to {}. Size: {}. Dispatch weight: {}. Fee: {}", + "Sending message to {}. Lane: {:?}. Size: {}. Dispatch weight: {}. Fee: {}", Target::NAME, + lane, signed_source_call.len(), dispatch_weight, fee, ); + log::info!( + target: "bridge", + "The source account ({:?}) balance will be reduced by (at most) {} (message fee) + {} (tx fee ) = {} {} tokens", + AccountId32::from(source_sign.public()), + fee.0, + estimated_transaction_fee.inclusion_fee(), + fee.0.saturating_add(estimated_transaction_fee.inclusion_fee() as _), + Source::NAME, + ); log::info!( target: "bridge", "Signed {} Call: {:?}", @@ -197,10 +239,7 @@ fn prepare_call_dispatch_weight( weight_from_pre_dispatch_call: ExplicitOrMaximal, maximal_allowed_weight: Weight, ) -> Weight { - match user_specified_dispatch_weight - .clone() - .unwrap_or(weight_from_pre_dispatch_call) - { + match user_specified_dispatch_weight.clone().unwrap_or(weight_from_pre_dispatch_call) { ExplicitOrMaximal::Explicit(weight) => weight, ExplicitOrMaximal::Maximal => maximal_allowed_weight, } @@ -211,6 +250,7 @@ pub(crate) fn message_payload( weight: Weight, origin: CallOrigin, call: &impl Encode, + dispatch_fee_payment: DispatchFeePayment, ) -> MessagePayload> where SAccountId: Encode + Debug, @@ -222,7 +262,7 @@ where spec_version, weight, origin, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + dispatch_fee_payment: dispatch_fee_payment.into(), call: HexBytes::encode(call), }; @@ -230,24 +270,14 @@ where log::info!(target: "bridge", "Encoded Message Payload: {:?}", HexBytes::encode(&payload)); // re-pack to return `Vec` - let MessagePayload { - spec_version, - weight, - origin, - dispatch_fee_payment, - call, - } = payload; - MessagePayload { - spec_version, - weight, - origin, - dispatch_fee_payment, - call: call.0, - } + let MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call } = payload; + MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call: call.0 } } pub(crate) fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight) + bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight( + maximal_extrinsic_weight, + ) } #[cfg(test)] @@ -260,7 +290,7 @@ mod tests { // given let mut send_message = SendMessage::from_iter(vec![ "send-message", - "RialtoToMillau", + "rialto-to-millau", "--source-port", "1234", "--source-signer", @@ -278,10 +308,12 @@ mod tests { payload, MessagePayload { spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, - weight: 1038000, - origin: CallOrigin::SourceAccount(sp_keyring::AccountKeyring::Alice.to_account_id()), - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: hex!("0401081234").to_vec(), + weight: 576000, + origin: CallOrigin::SourceAccount( + sp_keyring::AccountKeyring::Alice.to_account_id() + ), + dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain, + call: hex!("0001081234").to_vec(), } ); } @@ -291,7 +323,7 @@ mod tests { // given let mut send_message = SendMessage::from_iter(vec![ "send-message", - "MillauToRialto", + "millau-to-rialto", "--source-port", "1234", "--source-signer", @@ -318,24 +350,24 @@ mod tests { payload, MessagePayload { spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, - weight: 1038000, + weight: 576000, origin: CallOrigin::TargetAccount( sp_keyring::AccountKeyring::Alice.to_account_id(), sp_keyring::AccountKeyring::Bob.into(), signature, ), - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: hex!("0701081234").to_vec(), + dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain, + call: hex!("0001081234").to_vec(), } ); } #[test] - fn target_signer_must_exist_if_origin_is_target() { + fn accepts_send_message_command_without_target_sign_options() { // given let send_message = SendMessage::from_iter_safe(vec![ "send-message", - "MillauToRialto", + "rialto-to-millau", "--source-port", "1234", "--source-signer", @@ -347,6 +379,31 @@ mod tests { "1234", ]); - assert!(send_message.is_err()); + assert!(send_message.is_ok()); + } + + #[test] + fn accepts_non_default_dispatch_fee_payment() { + // given + let mut send_message = SendMessage::from_iter(vec![ + "send-message", + "rialto-to-millau", + "--source-port", + "1234", + "--source-signer", + "//Alice", + "--dispatch-fee-payment", + "at-target-chain", + "remark", + ]); + + // when + let payload = send_message.encode_payload().unwrap(); + + // then + assert_eq!( + payload.dispatch_fee_payment, + bp_runtime::messages::DispatchFeePayment::AtTargetChain + ); } } diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs b/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs new file mode 100644 index 0000000000000000000000000000000000000000..dbe46f469070982383624cad63214adce761b6c8 --- /dev/null +++ b/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs @@ -0,0 +1,799 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tokens swap using token-swap bridge pallet. + +// TokenSwapBalances fields are never directly accessed, but the whole struct is printed +// to show token swap progress +#![allow(dead_code)] + +use codec::Encode; +use num_traits::One; +use rand::random; +use structopt::StructOpt; +use strum::{EnumString, EnumVariantNames, VariantNames}; + +use frame_support::dispatch::GetDispatchInfo; +use relay_substrate_client::{ + AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, CallOf, Chain, ChainWithBalances, + Client, Error as SubstrateError, HashOf, SignatureOf, Subscription, TransactionSignScheme, + TransactionStatusOf, UnsignedTransaction, +}; +use sp_core::{blake2_256, storage::StorageKey, Bytes, Pair, H256, U256}; +use sp_runtime::traits::{Convert, Header as HeaderT}; + +use crate::cli::{ + Balance, CliChain, SourceConnectionParams, SourceSigningParams, TargetConnectionParams, + TargetSigningParams, +}; + +/// Swap tokens. +#[derive(StructOpt, Debug, PartialEq)] +pub struct SwapTokens { + /// A bridge instance to use in token swap. + #[structopt(possible_values = SwapTokensBridge::VARIANTS, case_insensitive = true)] + bridge: SwapTokensBridge, + + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + + #[structopt(subcommand)] + swap_type: TokenSwapType, + /// Source chain balance that source signer wants to swap. + #[structopt(long)] + source_balance: Balance, + /// Target chain balance that target signer wants to swap. + #[structopt(long)] + target_balance: Balance, +} + +/// Token swap type. +#[derive(StructOpt, Debug, PartialEq, Eq, Clone)] +pub enum TokenSwapType { + /// The `target_sign` is temporary and only have funds for single swap. + NoLock, + /// This swap type prevents `source_signer` from restarting the swap after it has been + /// completed. + LockUntilBlock { + /// Number of blocks before the swap expires. + #[structopt(long)] + blocks_before_expire: u32, + /// Unique swap nonce. + #[structopt(long)] + swap_nonce: Option, + }, +} + +/// Swap tokens bridge. +#[derive(Debug, EnumString, EnumVariantNames, PartialEq)] +#[strum(serialize_all = "kebab_case")] +pub enum SwapTokensBridge { + /// Use token-swap pallet deployed at Millau to swap tokens with Rialto. + MillauToRialto, +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + SwapTokensBridge::MillauToRialto => { + type Source = relay_millau_client::Millau; + type Target = relay_rialto_client::Rialto; + + type FromSwapToThisAccountIdConverter = bp_rialto::AccountIdConverter; + + use bp_millau::{ + derive_account_from_rialto_id as derive_source_account_from_target_account, + TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_TARGET_TO_SOURCE_MESSAGE_FEE_METHOD, + WITH_RIALTO_TOKEN_SWAP_PALLET_NAME as TOKEN_SWAP_PALLET_NAME, + }; + use bp_rialto::{ + derive_account_from_millau_id as derive_target_account_from_source_account, + TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_SOURCE_TO_TARGET_MESSAGE_FEE_METHOD, + }; + + const SOURCE_CHAIN_ID: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID; + const TARGET_CHAIN_ID: bp_runtime::ChainId = bp_runtime::RIALTO_CHAIN_ID; + + const SOURCE_SPEC_VERSION: u32 = millau_runtime::VERSION.spec_version; + const TARGET_SPEC_VERSION: u32 = rialto_runtime::VERSION.spec_version; + + const SOURCE_TO_TARGET_LANE_ID: bp_messages::LaneId = *b"swap"; + const TARGET_TO_SOURCE_LANE_ID: bp_messages::LaneId = [0, 0, 0, 0]; + + $generic + }, + } + }; +} + +impl SwapTokens { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self.bridge, { + let source_client = self.source.to_client::().await?; + let source_sign = self.source_sign.to_keypair::()?; + let target_client = self.target.to_client::().await?; + let target_sign = self.target_sign.to_keypair::()?; + + // names of variables in this function are matching names used by the + // `pallet-bridge-token-swap` + + // prepare token swap intention + let token_swap = self + .prepare_token_swap::(&source_client, &source_sign, &target_sign) + .await?; + + // group all accounts that will be used later + let accounts = TokenSwapAccounts { + source_account_at_bridged_chain: derive_target_account_from_source_account( + bp_runtime::SourceAccount::Account( + token_swap.source_account_at_this_chain.clone(), + ), + ), + target_account_at_this_chain: derive_source_account_from_target_account( + bp_runtime::SourceAccount::Account( + token_swap.target_account_at_bridged_chain.clone(), + ), + ), + source_account_at_this_chain: token_swap.source_account_at_this_chain.clone(), + target_account_at_bridged_chain: token_swap.target_account_at_bridged_chain.clone(), + swap_account: FromSwapToThisAccountIdConverter::convert( + token_swap.using_encoded(blake2_256).into(), + ), + }; + + // account balances are used to demonstrate what's happening :) + let initial_balances = + read_account_balances(&accounts, &source_client, &target_client).await?; + + // before calling something that may fail, log what we're trying to do + log::info!(target: "bridge", "Starting swap: {:?}", token_swap); + log::info!(target: "bridge", "Swap accounts: {:?}", accounts); + log::info!(target: "bridge", "Initial account balances: {:?}", initial_balances); + + // + // Step 1: swap is created + // + + // prepare `Currency::transfer` call that will happen at the target chain + let bridged_currency_transfer: CallOf = pallet_balances::Call::transfer { + dest: accounts.source_account_at_bridged_chain.clone().into(), + value: token_swap.target_balance_at_bridged_chain, + } + .into(); + let bridged_currency_transfer_weight = + bridged_currency_transfer.get_dispatch_info().weight; + + // sign message + let bridged_chain_spec_version = TARGET_SPEC_VERSION; + let signature_payload = pallet_bridge_dispatch::account_ownership_digest( + &bridged_currency_transfer, + &accounts.swap_account, + &bridged_chain_spec_version, + SOURCE_CHAIN_ID, + TARGET_CHAIN_ID, + ); + let bridged_currency_transfer_signature: SignatureOf = + target_sign.sign(&signature_payload).into(); + + // prepare `create_swap` call + let target_public_at_bridged_chain: AccountPublicOf = + target_sign.public().into(); + let swap_delivery_and_dispatch_fee: BalanceOf = + crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee( + &source_client, + ESTIMATE_SOURCE_TO_TARGET_MESSAGE_FEE_METHOD, + SOURCE_TO_TARGET_LANE_ID, + bp_message_dispatch::MessagePayload { + spec_version: TARGET_SPEC_VERSION, + weight: bridged_currency_transfer_weight, + origin: bp_message_dispatch::CallOrigin::TargetAccount( + accounts.swap_account.clone(), + target_public_at_bridged_chain.clone(), + bridged_currency_transfer_signature.clone(), + ), + dispatch_fee_payment: + bp_runtime::messages::DispatchFeePayment::AtTargetChain, + call: bridged_currency_transfer.encode(), + }, + ) + .await?; + let create_swap_call: CallOf = pallet_bridge_token_swap::Call::create_swap { + swap: token_swap.clone(), + swap_creation_params: Box::new(bp_token_swap::TokenSwapCreation { + target_public_at_bridged_chain, + swap_delivery_and_dispatch_fee, + bridged_chain_spec_version, + bridged_currency_transfer: bridged_currency_transfer.encode(), + bridged_currency_transfer_weight, + bridged_currency_transfer_signature, + }), + } + .into(); + + // start tokens swap + let source_genesis_hash = *source_client.genesis_hash(); + let create_swap_signer = source_sign.clone(); + let swap_created_at = wait_until_transaction_is_finalized::( + source_client + .submit_and_watch_signed_extrinsic( + accounts.source_account_at_this_chain.clone(), + move |_, transaction_nonce| { + Bytes( + Source::sign_transaction( + source_genesis_hash, + &create_swap_signer, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new(create_swap_call, transaction_nonce), + ) + .encode(), + ) + }, + ) + .await?, + ) + .await?; + + // read state of swap after it has been created + let token_swap_hash: H256 = token_swap.using_encoded(blake2_256).into(); + let token_swap_storage_key = bp_runtime::storage_map_final_key_identity( + TOKEN_SWAP_PALLET_NAME, + pallet_bridge_token_swap::PENDING_SWAPS_MAP_NAME, + token_swap_hash.as_ref(), + ); + match read_token_swap_state(&source_client, swap_created_at, &token_swap_storage_key) + .await? + { + Some(bp_token_swap::TokenSwapState::Started) => { + log::info!(target: "bridge", "Swap has been successfully started"); + let intermediate_balances = + read_account_balances(&accounts, &source_client, &target_client).await?; + log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances); + }, + Some(token_swap_state) => + return Err(anyhow::format_err!( + "Fresh token swap has unexpected state: {:?}", + token_swap_state, + )), + None => return Err(anyhow::format_err!("Failed to start token swap")), + }; + + // + // Step 2: message is being relayed to the target chain and dispathed there + // + + // wait until message is dispatched at the target chain and dispatch result delivered + // back to source chain + let token_swap_state = wait_until_token_swap_state_is_changed( + &source_client, + &token_swap_storage_key, + bp_token_swap::TokenSwapState::Started, + ) + .await?; + let is_transfer_succeeded = match token_swap_state { + Some(bp_token_swap::TokenSwapState::Started) => { + unreachable!("wait_until_token_swap_state_is_changed only returns if state is not Started; qed",) + }, + None => + return Err(anyhow::format_err!("Fresh token swap has disappeared unexpectedly")), + Some(bp_token_swap::TokenSwapState::Confirmed) => { + log::info!( + target: "bridge", + "Transfer has been successfully dispatched at the target chain. Swap can be claimed", + ); + true + }, + Some(bp_token_swap::TokenSwapState::Failed) => { + log::info!( + target: "bridge", + "Transfer has been dispatched with an error at the target chain. Swap can be canceled", + ); + false + }, + }; + + // by this time: (1) token swap account has been created and (2) if transfer has been + // successfully dispatched, both target chain balances have changed + let intermediate_balances = + read_account_balances(&accounts, &source_client, &target_client).await?; + log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances); + + // transfer has been dispatched, but we may need to wait until block where swap can be + // claimed/canceled + if let bp_token_swap::TokenSwapType::LockClaimUntilBlock( + ref last_available_block_number, + _, + ) = token_swap.swap_type + { + wait_until_swap_unlocked( + &source_client, + last_available_block_number + BlockNumberOf::::one(), + ) + .await?; + } + + // + // Step 3: we may now claim or cancel the swap + // + + if is_transfer_succeeded { + log::info!(target: "bridge", "Claiming the swap swap"); + + // prepare `claim_swap` message that will be sent over the bridge + let claim_swap_call: CallOf = + pallet_bridge_token_swap::Call::claim_swap { swap: token_swap }.into(); + let claim_swap_message = bp_message_dispatch::MessagePayload { + spec_version: SOURCE_SPEC_VERSION, + weight: claim_swap_call.get_dispatch_info().weight, + origin: bp_message_dispatch::CallOrigin::SourceAccount( + accounts.target_account_at_bridged_chain.clone(), + ), + dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain, + call: claim_swap_call.encode(), + }; + let claim_swap_delivery_and_dispatch_fee: BalanceOf = + crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee( + &target_client, + ESTIMATE_TARGET_TO_SOURCE_MESSAGE_FEE_METHOD, + TARGET_TO_SOURCE_LANE_ID, + claim_swap_message.clone(), + ) + .await?; + let send_message_call: CallOf = + pallet_bridge_messages::Call::send_message { + lane_id: TARGET_TO_SOURCE_LANE_ID, + payload: claim_swap_message, + delivery_and_dispatch_fee: claim_swap_delivery_and_dispatch_fee, + } + .into(); + + // send `claim_swap` message + let target_genesis_hash = *target_client.genesis_hash(); + let _ = wait_until_transaction_is_finalized::( + target_client + .submit_and_watch_signed_extrinsic( + accounts.target_account_at_bridged_chain.clone(), + move |_, transaction_nonce| { + Bytes( + Target::sign_transaction( + target_genesis_hash, + &target_sign, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + send_message_call, + transaction_nonce, + ), + ) + .encode(), + ) + }, + ) + .await?, + ) + .await?; + + // wait until swap state is updated + let token_swap_state = wait_until_token_swap_state_is_changed( + &source_client, + &token_swap_storage_key, + bp_token_swap::TokenSwapState::Confirmed, + ) + .await?; + if token_swap_state != None { + return Err(anyhow::format_err!( + "Confirmed token swap state has been changed to {:?} unexpectedly", + token_swap_state + )) + } + } else { + log::info!(target: "bridge", "Cancelling the swap"); + let cancel_swap_call: CallOf = + pallet_bridge_token_swap::Call::cancel_swap { swap: token_swap.clone() }.into(); + let _ = wait_until_transaction_is_finalized::( + source_client + .submit_and_watch_signed_extrinsic( + accounts.source_account_at_this_chain.clone(), + move |_, transaction_nonce| { + Bytes( + Source::sign_transaction( + source_genesis_hash, + &source_sign, + relay_substrate_client::TransactionEra::immortal(), + UnsignedTransaction::new( + cancel_swap_call, + transaction_nonce, + ), + ) + .encode(), + ) + }, + ) + .await?, + ) + .await?; + } + + // print final balances + let final_balances = + read_account_balances(&accounts, &source_client, &target_client).await?; + log::info!(target: "bridge", "Final account balances: {:?}", final_balances); + + Ok(()) + }) + } + + /// Prepare token swap intention. + async fn prepare_token_swap( + &self, + source_client: &Client, + source_sign: &Source::KeyPair, + target_sign: &Target::KeyPair, + ) -> anyhow::Result< + bp_token_swap::TokenSwap< + BlockNumberOf, + BalanceOf, + AccountIdOf, + BalanceOf, + AccountIdOf, + >, + > + where + AccountIdOf: From<::Public>, + AccountIdOf: From<::Public>, + BalanceOf: From, + BalanceOf: From, + { + // accounts that are directly controlled by participants + let source_account_at_this_chain: AccountIdOf = source_sign.public().into(); + let target_account_at_bridged_chain: AccountIdOf = target_sign.public().into(); + + // balances that we're going to swap + let source_balance_at_this_chain: BalanceOf = self.source_balance.cast().into(); + let target_balance_at_bridged_chain: BalanceOf = self.target_balance.cast().into(); + + // prepare token swap intention + Ok(bp_token_swap::TokenSwap { + swap_type: self.prepare_token_swap_type(source_client).await?, + source_balance_at_this_chain, + source_account_at_this_chain: source_account_at_this_chain.clone(), + target_balance_at_bridged_chain, + target_account_at_bridged_chain: target_account_at_bridged_chain.clone(), + }) + } + + /// Prepare token swap type. + async fn prepare_token_swap_type( + &self, + source_client: &Client, + ) -> anyhow::Result>> { + match self.swap_type { + TokenSwapType::NoLock => + Ok(bp_token_swap::TokenSwapType::TemporaryTargetAccountAtBridgedChain), + TokenSwapType::LockUntilBlock { blocks_before_expire, ref swap_nonce } => { + let blocks_before_expire: BlockNumberOf = blocks_before_expire.into(); + let current_source_block_number = *source_client.best_header().await?.number(); + Ok(bp_token_swap::TokenSwapType::LockClaimUntilBlock( + current_source_block_number + blocks_before_expire, + swap_nonce.unwrap_or_else(|| { + U256::from(random::()).overflowing_mul(U256::from(random::())).0 + }), + )) + }, + } + } +} + +/// Accounts that are participating in the swap. +#[derive(Debug)] +struct TokenSwapAccounts { + source_account_at_this_chain: ThisAccountId, + source_account_at_bridged_chain: BridgedAccountId, + target_account_at_bridged_chain: BridgedAccountId, + target_account_at_this_chain: ThisAccountId, + swap_account: ThisAccountId, +} + +/// Swap accounts balances. +#[derive(Debug)] +struct TokenSwapBalances { + source_account_at_this_chain_balance: Option, + source_account_at_bridged_chain_balance: Option, + target_account_at_bridged_chain_balance: Option, + target_account_at_this_chain_balance: Option, + swap_account_balance: Option, +} + +/// Read swap accounts balances. +async fn read_account_balances( + accounts: &TokenSwapAccounts, AccountIdOf>, + source_client: &Client, + target_client: &Client, +) -> anyhow::Result, BalanceOf>> { + Ok(TokenSwapBalances { + source_account_at_this_chain_balance: read_account_balance( + source_client, + &accounts.source_account_at_this_chain, + ) + .await?, + source_account_at_bridged_chain_balance: read_account_balance( + target_client, + &accounts.source_account_at_bridged_chain, + ) + .await?, + target_account_at_bridged_chain_balance: read_account_balance( + target_client, + &accounts.target_account_at_bridged_chain, + ) + .await?, + target_account_at_this_chain_balance: read_account_balance( + source_client, + &accounts.target_account_at_this_chain, + ) + .await?, + swap_account_balance: read_account_balance(source_client, &accounts.swap_account).await?, + }) +} + +/// Read account balance. +async fn read_account_balance( + client: &Client, + account: &AccountIdOf, +) -> anyhow::Result>> { + match client.free_native_balance(account.clone()).await { + Ok(balance) => Ok(Some(balance)), + Err(SubstrateError::AccountDoesNotExist) => Ok(None), + Err(error) => Err(anyhow::format_err!( + "Failed to read balance of {} account {:?}: {:?}", + C::NAME, + account, + error, + )), + } +} + +/// Wait until transaction is included into finalized block. +/// +/// Returns the hash of the finalized block with transaction. +pub(crate) async fn wait_until_transaction_is_finalized( + subscription: Subscription>, +) -> anyhow::Result> { + loop { + let transaction_status = subscription.next().await?; + match transaction_status { + Some(TransactionStatusOf::::FinalityTimeout(_)) | + Some(TransactionStatusOf::::Usurped(_)) | + Some(TransactionStatusOf::::Dropped) | + Some(TransactionStatusOf::::Invalid) | + None => + return Err(anyhow::format_err!( + "We've been waiting for finalization of {} transaction, but it now has the {:?} status", + C::NAME, + transaction_status, + )), + Some(TransactionStatusOf::::Finalized(block_hash)) => { + log::trace!( + target: "bridge", + "{} transaction has been finalized at block {}", + C::NAME, + block_hash, + ); + return Ok(block_hash) + }, + _ => { + log::trace!( + target: "bridge", + "Received intermediate status of {} transaction: {:?}", + C::NAME, + transaction_status, + ); + }, + } + } +} + +/// Waits until token swap state is changed from `Started` to something else. +async fn wait_until_token_swap_state_is_changed( + client: &Client, + swap_state_storage_key: &StorageKey, + previous_token_swap_state: bp_token_swap::TokenSwapState, +) -> anyhow::Result> { + log::trace!(target: "bridge", "Waiting for token swap state change"); + loop { + async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await; + + let best_block = client.best_finalized_header_number().await?; + let best_block_hash = client.block_hash_by_number(best_block).await?; + log::trace!(target: "bridge", "Inspecting {} block {}/{}", C::NAME, best_block, best_block_hash); + + let token_swap_state = + read_token_swap_state(client, best_block_hash, swap_state_storage_key).await?; + match token_swap_state { + Some(new_token_swap_state) if new_token_swap_state == previous_token_swap_state => {}, + _ => { + log::trace!( + target: "bridge", + "Token swap state has been changed from {:?} to {:?}", + previous_token_swap_state, + token_swap_state, + ); + return Ok(token_swap_state) + }, + } + } +} + +/// Waits until swap can be claimed or canceled. +async fn wait_until_swap_unlocked( + client: &Client, + required_block_number: BlockNumberOf, +) -> anyhow::Result<()> { + log::trace!(target: "bridge", "Waiting for token swap unlock"); + loop { + async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await; + + let best_block = client.best_finalized_header_number().await?; + let best_block_hash = client.block_hash_by_number(best_block).await?; + if best_block >= required_block_number { + return Ok(()) + } + + log::trace!(target: "bridge", "Skipping {} block {}/{}", C::NAME, best_block, best_block_hash); + } +} + +/// Read state of the active token swap. +async fn read_token_swap_state( + client: &Client, + at_block: C::Hash, + swap_state_storage_key: &StorageKey, +) -> anyhow::Result> { + Ok(client.storage_value(swap_state_storage_key.clone(), Some(at_block)).await?) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn swap_tokens_millau_to_rialto_no_lock() { + let swap_tokens = SwapTokens::from_iter(vec![ + "swap-tokens", + "millau-to-rialto", + "--source-host", + "127.0.0.1", + "--source-port", + "9000", + "--source-signer", + "//Alice", + "--source-balance", + "8000000000", + "--target-host", + "127.0.0.1", + "--target-port", + "9001", + "--target-signer", + "//Bob", + "--target-balance", + "9000000000", + "no-lock", + ]); + + assert_eq!( + swap_tokens, + SwapTokens { + bridge: SwapTokensBridge::MillauToRialto, + source: SourceConnectionParams { + source_host: "127.0.0.1".into(), + source_port: 9000, + source_secure: false, + }, + source_sign: SourceSigningParams { + source_signer: Some("//Alice".into()), + source_signer_password: None, + source_signer_file: None, + source_signer_password_file: None, + source_transactions_mortality: None, + }, + target: TargetConnectionParams { + target_host: "127.0.0.1".into(), + target_port: 9001, + target_secure: false, + }, + target_sign: TargetSigningParams { + target_signer: Some("//Bob".into()), + target_signer_password: None, + target_signer_file: None, + target_signer_password_file: None, + target_transactions_mortality: None, + }, + swap_type: TokenSwapType::NoLock, + source_balance: Balance(8000000000), + target_balance: Balance(9000000000), + } + ); + } + + #[test] + fn swap_tokens_millau_to_rialto_lock_until() { + let swap_tokens = SwapTokens::from_iter(vec![ + "swap-tokens", + "millau-to-rialto", + "--source-host", + "127.0.0.1", + "--source-port", + "9000", + "--source-signer", + "//Alice", + "--source-balance", + "8000000000", + "--target-host", + "127.0.0.1", + "--target-port", + "9001", + "--target-signer", + "//Bob", + "--target-balance", + "9000000000", + "lock-until-block", + "--blocks-before-expire", + "1", + ]); + + assert_eq!( + swap_tokens, + SwapTokens { + bridge: SwapTokensBridge::MillauToRialto, + source: SourceConnectionParams { + source_host: "127.0.0.1".into(), + source_port: 9000, + source_secure: false, + }, + source_sign: SourceSigningParams { + source_signer: Some("//Alice".into()), + source_signer_password: None, + source_signer_file: None, + source_signer_password_file: None, + source_transactions_mortality: None, + }, + target: TargetConnectionParams { + target_host: "127.0.0.1".into(), + target_port: 9001, + target_secure: false, + }, + target_sign: TargetSigningParams { + target_signer: Some("//Bob".into()), + target_signer_password: None, + target_signer_file: None, + target_signer_password_file: None, + target_transactions_mortality: None, + }, + swap_type: TokenSwapType::LockUntilBlock { + blocks_before_expire: 1, + swap_nonce: None, + }, + source_balance: Balance(8000000000), + target_balance: Balance(9000000000), + } + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/main.rs b/polkadot/bridges/relays/bin-substrate/src/main.rs index d119042b0d8dc01639734696f19e6381a75dc7c8..13db6beefa6a0b4e89e00e803eb313f08930c510 100644 --- a/polkadot/bridges/relays/bin-substrate/src/main.rs +++ b/polkadot/bridges/relays/bin-substrate/src/main.rs @@ -20,13 +20,6 @@ mod chains; mod cli; -mod finality_pipeline; -mod finality_target; -mod headers_initialize; -mod messages_lane; -mod messages_source; -mod messages_target; -mod on_demand_headers; fn main() { let command = cli::parse_args(); diff --git a/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs b/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs deleted file mode 100644 index 458b08f6014ffd6bd358792a42c9fbd526dd0a22..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::messages_source::SubstrateMessagesProof; -use crate::messages_target::SubstrateMessagesReceivingProof; -use crate::on_demand_headers::OnDemandHeadersRelay; - -use bp_messages::{LaneId, MessageNonce}; -use frame_support::weights::Weight; -use messages_relay::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use relay_substrate_client::{BlockNumberOf, Chain, Client, HashOf}; -use relay_utils::{metrics::MetricsParams, BlockNumberBase}; -use sp_core::Bytes; -use std::ops::RangeInclusive; - -/// Substrate <-> Substrate messages relay parameters. -pub struct MessagesRelayParams { - /// Messages source client. - pub source_client: Client, - /// Sign parameters for messages source chain. - pub source_sign: SS, - /// Messages target client. - pub target_client: Client, - /// Sign parameters for messages target chain. - pub target_sign: TS, - /// Optional on-demand source to target headers relay. - pub source_to_target_headers_relay: Option>, - /// Optional on-demand target to source headers relay. - pub target_to_source_headers_relay: Option>, - /// Identifier of lane that needs to be served. - pub lane_id: LaneId, - /// Metrics parameters. - pub metrics_params: MetricsParams, -} - -/// Message sync pipeline for Substrate <-> Substrate relays. -pub trait SubstrateMessageLane: MessageLane { - /// Name of the runtime method that returns dispatch weight of outbound messages at the source chain. - const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str; - /// Name of the runtime method that returns latest generated nonce at the source chain. - const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str; - /// Name of the runtime method that returns latest received (confirmed) nonce at the the source chain. - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; - - /// Name of the runtime method that returns latest received nonce at the target chain. - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; - /// Name of the runtime method that returns latest confirmed (reward-paid) nonce at the target chain. - const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str; - /// Number of the runtime method that returns state of "unrewarded relayers" set at the target chain. - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str; - - /// Name of the runtime method that returns id of best finalized source header at target chain. - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str; - /// Name of the runtime method that returns id of best finalized target header at source chain. - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str; - - /// Source chain. - type SourceChain: Chain; - /// Target chain. - type TargetChain: Chain; - - /// Returns id of account that we're using to sign transactions at target chain (messages proof). - fn target_transactions_author(&self) -> ::AccountId; - - /// Make messages delivery transaction. - fn make_messages_delivery_transaction( - &self, - transaction_nonce: ::Index, - generated_at_header: SourceHeaderIdOf, - nonces: RangeInclusive, - proof: Self::MessagesProof, - ) -> Bytes; - - /// Returns id of account that we're using to sign transactions at source chain (delivery proof). - fn source_transactions_author(&self) -> ::AccountId; - - /// Make messages receiving proof transaction. - fn make_messages_receiving_proof_transaction( - &self, - transaction_nonce: ::Index, - generated_at_header: TargetHeaderIdOf, - proof: Self::MessagesReceivingProof, - ) -> Bytes; -} - -/// Substrate-to-Substrate message lane. -#[derive(Debug)] -pub struct SubstrateMessageLaneToSubstrate { - /// Client for the source Substrate chain. - pub(crate) source_client: Client, - /// Parameters required to sign transactions for source chain. - pub(crate) source_sign: SourceSignParams, - /// Client for the target Substrate chain. - pub(crate) target_client: Client, - /// Parameters required to sign transactions for target chain. - pub(crate) target_sign: TargetSignParams, - /// Account id of relayer at the source chain. - pub(crate) relayer_id_at_source: Source::AccountId, -} - -impl Clone - for SubstrateMessageLaneToSubstrate -{ - fn clone(&self) -> Self { - Self { - source_client: self.source_client.clone(), - source_sign: self.source_sign.clone(), - target_client: self.target_client.clone(), - target_sign: self.target_sign.clone(), - relayer_id_at_source: self.relayer_id_at_source.clone(), - } - } -} - -impl MessageLane - for SubstrateMessageLaneToSubstrate -where - SourceSignParams: Clone + Send + Sync + 'static, - TargetSignParams: Clone + Send + Sync + 'static, - BlockNumberOf: BlockNumberBase, - BlockNumberOf: BlockNumberBase, -{ - const SOURCE_NAME: &'static str = Source::NAME; - const TARGET_NAME: &'static str = Target::NAME; - - type MessagesProof = SubstrateMessagesProof; - type MessagesReceivingProof = SubstrateMessagesReceivingProof; - - type SourceChainBalance = Source::Balance; - type SourceHeaderNumber = BlockNumberOf; - type SourceHeaderHash = HashOf; - - type TargetHeaderNumber = BlockNumberOf; - type TargetHeaderHash = HashOf; -} - -/// Returns maximal number of messages and their maximal cumulative dispatch weight, based -/// on given chain parameters. -pub fn select_delivery_transaction_limits( - max_extrinsic_weight: Weight, - max_unconfirmed_messages_at_inbound_lane: MessageNonce, -) -> (MessageNonce, Weight) { - // We may try to guess accurate value, based on maximal number of messages and per-message - // weight overhead, but the relay loop isn't using this info in a super-accurate way anyway. - // So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is - // for messages dispatch. - - // Another thing to keep in mind is that our runtimes (when this code was written) accept - // messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than - // that for dispatch. - - let weight_for_delivery_tx = max_extrinsic_weight / 3; - let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; - - let delivery_tx_base_weight = - W::receive_messages_proof_overhead() + W::receive_messages_proof_outbound_lane_state_overhead(); - let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight; - let max_number_of_messages = std::cmp::min( - delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1), - max_unconfirmed_messages_at_inbound_lane, - ); - - assert!( - max_number_of_messages > 0, - "Relay should fit at least one message in every delivery transaction", - ); - assert!( - weight_for_messages_dispatch >= max_extrinsic_weight / 2, - "Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2", - ); - - (max_number_of_messages, weight_for_messages_dispatch) -} - -#[cfg(test)] -mod tests { - use super::*; - - type RialtoToMillauMessagesWeights = pallet_bridge_messages::weights::RialtoWeight; - - #[test] - fn select_delivery_transaction_limits_works() { - let (max_count, max_weight) = select_delivery_transaction_limits::( - bp_millau::max_extrinsic_weight(), - bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - ); - assert_eq!( - (max_count, max_weight), - // We don't actually care about these values, so feel free to update them whenever test - // fails. The only thing to do before that is to ensure that new values looks sane: i.e. weight - // reserved for messages dispatch allows dispatch of non-trivial messages. - // - // Any significant change in this values should attract additional attention. - (782, 216_583_333_334), - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/messages_target.rs b/polkadot/bridges/relays/bin-substrate/src/messages_target.rs deleted file mode 100644 index f74efbe61b5af2d9c58d803bda4fcba0d4ba9456..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/bin-substrate/src/messages_target.rs +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate messages target. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! chain. - -use crate::messages_lane::SubstrateMessageLane; -use crate::messages_source::read_client_state; -use crate::on_demand_headers::OnDemandHeadersRelay; - -use async_trait::async_trait; -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; -use bp_runtime::ChainId; -use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; -use codec::{Decode, Encode}; -use frame_support::{traits::Instance, weights::Weight}; -use messages_relay::{ - message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{TargetClient, TargetClientState}, -}; -use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf}; -use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase}; -use sp_core::Bytes; -use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; -use std::{marker::PhantomData, ops::RangeInclusive}; - -/// Message receiving proof returned by the target Substrate node. -pub type SubstrateMessagesReceivingProof = ( - UnrewardedRelayersState, - FromBridgedChainMessagesDeliveryProof>, -); - -/// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - client: Client, - lane: P, - lane_id: LaneId, - instance: ChainId, - source_to_target_headers_relay: Option>, - _phantom: PhantomData, -} - -impl SubstrateMessagesTarget { - /// Create new Substrate headers target. - pub fn new( - client: Client, - lane: P, - lane_id: LaneId, - instance: ChainId, - source_to_target_headers_relay: Option>, - ) -> Self { - SubstrateMessagesTarget { - client, - lane, - lane_id, - instance, - source_to_target_headers_relay, - _phantom: Default::default(), - } - } -} - -impl Clone for SubstrateMessagesTarget { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - lane: self.lane.clone(), - lane_id: self.lane_id, - instance: self.instance, - source_to_target_headers_relay: self.source_to_target_headers_relay.clone(), - _phantom: Default::default(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateMessagesTarget -where - C: Chain, - P: SubstrateMessageLane, - I: Send + Sync + Instance, -{ - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await - } -} - -#[async_trait] -impl TargetClient

for SubstrateMessagesTarget -where - C: Chain, - C::Header: DeserializeOwned, - C::Index: DeserializeOwned, - ::Number: BlockNumberBase, - P: SubstrateMessageLane< - TargetChain = C, - MessagesReceivingProof = SubstrateMessagesReceivingProof, - TargetHeaderNumber = ::Number, - TargetHeaderHash = ::Hash, - >, - P::SourceChain: Chain, - P::SourceHeaderNumber: Decode, - P::SourceHeaderHash: Decode, - I: Send + Sync + Instance, -{ - async fn state(&self) -> Result, SubstrateError> { - // we can't continue to deliver messages if target node is out of sync, because - // it may have already received (some of) messages that we're going to deliver - self.client.ensure_synced().await?; - - read_client_state::<_, P::SourceHeaderHash, P::SourceHeaderNumber>( - &self.client, - P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET, - ) - .await - } - - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let latest_received_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, latest_received_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let latest_received_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, latest_received_nonce)) - } - - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, UnrewardedRelayersState), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::INBOUND_LANE_UNREWARDED_RELAYERS_STATE.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let unrewarded_relayers_state: UnrewardedRelayersState = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, unrewarded_relayers_state)) - } - - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), SubstrateError> { - let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; - let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::(&self.lane_id); - let proof = self - .client - .prove_storage(vec![inbound_data_key], id.1) - .await? - .iter_nodes() - .collect(); - let proof = FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: id.1, - storage_proof: proof, - lane: self.lane_id, - }; - Ok((id, (relayers_state, proof))) - } - - async fn submit_messages_proof( - &self, - generated_at_header: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, SubstrateError> { - self.client - .submit_signed_extrinsic(self.lane.target_transactions_author(), |transaction_nonce| { - self.lane.make_messages_delivery_transaction( - transaction_nonce, - generated_at_header, - nonces.clone(), - proof, - ) - }) - .await?; - Ok(nonces) - } - - async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

) { - if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay { - source_to_target_headers_relay.require_finalized_header(id).await; - } - } - - async fn estimate_delivery_transaction_in_source_tokens( - &self, - _nonces: RangeInclusive, - _total_dispatch_weight: Weight, - _total_size: u32, - ) -> P::SourceChainBalance { - num_traits::Zero::zero() // TODO: https://github.com/paritytech/parity-bridges-common/issues/997 - } -} diff --git a/polkadot/bridges/relays/client-ethereum/Cargo.toml b/polkadot/bridges/relays/client-ethereum/Cargo.toml deleted file mode 100644 index 64a76a6b5dae3ccd4507d414f51293b711fc44d4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/client-ethereum/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "relay-ethereum-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } -hex-literal = "0.3" -jsonrpsee-proc-macros = "=0.2.0-alpha.6" -jsonrpsee-ws-client = "=0.2.0-alpha.6" -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } -log = "0.4.11" -relay-utils = { path = "../utils" } -web3 = { version = "0.15", git = "https://github.com/tomusdrw/rust-web3", branch ="td-ethabi", default-features = false } diff --git a/polkadot/bridges/relays/client-ethereum/src/client.rs b/polkadot/bridges/relays/client-ethereum/src/client.rs deleted file mode 100644 index 71dac5df6d481aca302cb8ef14fbec920c11e8a6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/client-ethereum/src/client.rs +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::rpc::Ethereum; -use crate::types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx, SyncState, Transaction, - TransactionHash, H256, U256, -}; -use crate::{ConnectionParams, Error, Result}; - -use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; -use relay_utils::relay_loop::RECONNECT_DELAY; -use std::sync::Arc; - -/// Number of headers missing from the Ethereum node for us to consider node not synced. -const MAJOR_SYNC_BLOCKS: u64 = 5; - -/// The client used to interact with an Ethereum node through RPC. -#[derive(Clone)] -pub struct Client { - params: ConnectionParams, - client: Arc, -} - -impl Client { - /// Create a new Ethereum RPC Client. - /// - /// This function will keep connecting to given Ethereum node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to Ethereum node: {:?}. Going to retry in {}s", - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been established - /// or error otherwise. - pub async fn try_connect(params: ConnectionParams) -> Result { - Ok(Self { - client: Self::build_client(¶ms).await?, - params, - }) - } - - /// Build client to use in connection. - async fn build_client(params: &ConnectionParams) -> Result> { - let uri = format!("ws://{}:{}", params.host, params.port); - let client = RpcClientBuilder::default().build(&uri).await?; - Ok(Arc::new(client)) - } - - /// Reopen client connection. - pub async fn reconnect(&mut self) -> Result<()> { - self.client = Self::build_client(&self.params).await?; - Ok(()) - } -} - -impl Client { - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - match Ethereum::syncing(&*self.client).await? { - SyncState::NotSyncing => Ok(()), - SyncState::Syncing(syncing) => { - let missing_headers = syncing.highest_block.saturating_sub(syncing.current_block); - if missing_headers > MAJOR_SYNC_BLOCKS.into() { - return Err(Error::ClientNotSynced(missing_headers)); - } - - Ok(()) - } - } - } - - /// Estimate gas usage for the given call. - pub async fn estimate_gas(&self, call_request: CallRequest) -> Result { - Ok(Ethereum::estimate_gas(&*self.client, call_request).await?) - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn best_block_number(&self) -> Result { - Ok(Ethereum::block_number(&*self.client).await?.as_u64()) - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn header_by_number(&self, block_number: u64) -> Result

{ - let get_full_tx_objects = false; - let header = Ethereum::get_block_by_number(&*self.client, block_number, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - } - - /// Retrieve block header by its hash from Ethereum node. - pub async fn header_by_hash(&self, hash: H256) -> Result
{ - let get_full_tx_objects = false; - let header = Ethereum::get_block_by_hash(&*self.client, hash, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - } - - /// Retrieve block header and its transactions by its number from Ethereum node. - pub async fn header_by_number_with_transactions(&self, number: u64) -> Result { - let get_full_tx_objects = true; - let header = - Ethereum::get_block_by_number_with_transactions(&*self.client, number, get_full_tx_objects).await?; - - let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader); - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction); - } - - Ok(header) - } - - /// Retrieve block header and its transactions by its hash from Ethereum node. - pub async fn header_by_hash_with_transactions(&self, hash: H256) -> Result { - let get_full_tx_objects = true; - let header = Ethereum::get_block_by_hash_with_transactions(&*self.client, hash, get_full_tx_objects).await?; - - let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader); - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction); - } - - Ok(header) - } - - /// Retrieve transaction by its hash from Ethereum node. - pub async fn transaction_by_hash(&self, hash: H256) -> Result> { - Ok(Ethereum::transaction_by_hash(&*self.client, hash).await?) - } - - /// Retrieve transaction receipt by transaction hash. - pub async fn transaction_receipt(&self, transaction_hash: H256) -> Result { - Ok(Ethereum::get_transaction_receipt(&*self.client, transaction_hash).await?) - } - - /// Get the nonce of the given account. - pub async fn account_nonce(&self, address: Address) -> Result { - Ok(Ethereum::get_transaction_count(&*self.client, address).await?) - } - - /// Submit an Ethereum transaction. - /// - /// The transaction must already be signed before sending it through this method. - pub async fn submit_transaction(&self, signed_raw_tx: SignedRawTx) -> Result { - let transaction = Bytes(signed_raw_tx); - let tx_hash = Ethereum::submit_transaction(&*self.client, transaction).await?; - log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash); - Ok(tx_hash) - } - - /// Call Ethereum smart contract. - pub async fn eth_call(&self, call_transaction: CallRequest) -> Result { - Ok(Ethereum::call(&*self.client, call_transaction).await?) - } -} diff --git a/polkadot/bridges/relays/client-ethereum/src/error.rs b/polkadot/bridges/relays/client-ethereum/src/error.rs deleted file mode 100644 index bcd8edc3f33ae5dc38a753003f83609f6590d676..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/client-ethereum/src/error.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC errors. - -use crate::types::U256; - -use jsonrpsee_ws_client::Error as RpcError; -use relay_utils::MaybeConnectionError; - -/// Result type used by Ethereum client. -pub type Result = std::result::Result; - -/// Errors that can occur only when interacting with -/// an Ethereum node through RPC. -#[derive(Debug)] -pub enum Error { - /// An error that can occur when making an HTTP request to - /// an JSON-RPC client. - RpcError(RpcError), - /// Failed to parse response. - ResponseParseFailed(String), - /// We have received a header with missing fields. - IncompleteHeader, - /// We have received a transaction missing a `raw` field. - IncompleteTransaction, - /// An invalid Substrate block number was received from - /// an Ethereum node. - InvalidSubstrateBlockNumber, - /// An invalid index has been received from an Ethereum node. - InvalidIncompleteIndex, - /// The client we're connected to is not synced, so we can't rely on its state. Contains - /// number of unsynced headers. - ClientNotSynced(U256), -} - -impl From for Error { - fn from(error: RpcError) -> Self { - Error::RpcError(error) - } -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - matches!( - *self, - Error::RpcError(RpcError::TransportError(_)) - // right now if connection to the ws server is dropped (after it is already established), - // we're getting this error - | Error::RpcError(RpcError::Internal(_)) - | Error::ClientNotSynced(_), - ) - } -} - -impl ToString for Error { - fn to_string(&self) -> String { - match self { - Self::RpcError(e) => e.to_string(), - Self::ResponseParseFailed(e) => e.to_string(), - Self::IncompleteHeader => { - "Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom)" - .to_string() - } - Self::IncompleteTransaction => "Incomplete Ethereum Transaction (missing required field - raw)".to_string(), - Self::InvalidSubstrateBlockNumber => "Received an invalid Substrate block from Ethereum Node".to_string(), - Self::InvalidIncompleteIndex => "Received an invalid incomplete index from Ethereum Node".to_string(), - Self::ClientNotSynced(missing_headers) => { - format!("Ethereum client is not synced: syncing {} headers", missing_headers) - } - } - } -} diff --git a/polkadot/bridges/relays/client-ethereum/src/rpc.rs b/polkadot/bridges/relays/client-ethereum/src/rpc.rs deleted file mode 100644 index 0fb81f7655a4622acdd0de4ae365f9cc67a4c15b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/client-ethereum/src/rpc.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC interface. - -use crate::types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, TransactionHash, - H256, U256, U64, -}; - -jsonrpsee_proc_macros::rpc_client_api! { - pub(crate) Ethereum { - #[rpc(method = "eth_syncing", positional_params)] - fn syncing() -> SyncState; - #[rpc(method = "eth_estimateGas", positional_params)] - fn estimate_gas(call_request: CallRequest) -> U256; - #[rpc(method = "eth_blockNumber", positional_params)] - fn block_number() -> U64; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number(block_number: U64, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash(hash: H256, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number_with_transactions(number: U64, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash_with_transactions(hash: H256, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getTransactionByHash", positional_params)] - fn transaction_by_hash(hash: H256) -> Option; - #[rpc(method = "eth_getTransactionReceipt", positional_params)] - fn get_transaction_receipt(transaction_hash: H256) -> Receipt; - #[rpc(method = "eth_getTransactionCount", positional_params)] - fn get_transaction_count(address: Address) -> U256; - #[rpc(method = "eth_submitTransaction", positional_params)] - fn submit_transaction(transaction: Bytes) -> TransactionHash; - #[rpc(method = "eth_call", positional_params)] - fn call(transaction_call: CallRequest) -> Bytes; - } -} diff --git a/polkadot/bridges/relays/client-ethereum/src/sign.rs b/polkadot/bridges/relays/client-ethereum/src/sign.rs deleted file mode 100644 index 6f479ab7d5cd4c8d4984593e5047e0c444326dda..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/client-ethereum/src/sign.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::types::{Address, CallRequest, U256}; -use crate::{Client, Result}; -use bp_eth_poa::signatures::{secret_to_address, SignTransaction}; -use hex_literal::hex; -use secp256k1::SecretKey; - -/// Ethereum signing params. -#[derive(Clone, Debug)] -pub struct SigningParams { - /// Ethereum chain id. - pub chain_id: u64, - /// Ethereum transactions signer. - pub signer: SecretKey, - /// Gas price we agree to pay. - pub gas_price: U256, -} - -impl Default for SigningParams { - fn default() -> Self { - SigningParams { - chain_id: 0x11, // Parity dev chain - // account that has a lot of ether when we run instant seal engine - // address: 0x00a329c0648769a73afac7f9381e08fb43dbea72 - // secret: 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - signer: SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .expect("secret is hardcoded, thus valid; qed"), - gas_price: 8_000_000_000u64.into(), // 8 Gwei - } - } -} - -/// Sign and submit tranaction using given Ethereum client. -pub async fn sign_and_submit_transaction( - client: &Client, - params: &SigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, -) -> Result<()> { - let nonce = if let Some(n) = nonce { - n - } else { - let address: Address = secret_to_address(¶ms.signer); - client.account_nonce(address).await? - }; - - let call_request = CallRequest { - to: contract_address, - data: Some(encoded_call.clone().into()), - ..Default::default() - }; - let gas = client.estimate_gas(call_request).await?; - - let raw_transaction = bp_eth_poa::UnsignedTransaction { - nonce, - to: contract_address, - value: U256::zero(), - gas: if double_gas { gas.saturating_mul(2.into()) } else { gas }, - gas_price: params.gas_price, - payload: encoded_call, - } - .sign_by(¶ms.signer, Some(params.chain_id)); - - let _ = client.submit_transaction(raw_transaction).await?; - Ok(()) -} diff --git a/polkadot/bridges/relays/client-ethereum/src/types.rs b/polkadot/bridges/relays/client-ethereum/src/types.rs deleted file mode 100644 index f589474aff1be67eecf871cbcec887e6351dd9f6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/client-ethereum/src/types.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Common types that are used in relay <-> Ethereum node communications. - -use headers_relay::sync_types::SourceHeader; - -pub use web3::types::{Address, Bytes, CallRequest, SyncState, H256, U128, U256, U64}; - -/// When header is just received from the Ethereum node, we check that it has -/// both number and hash fields filled. -pub const HEADER_ID_PROOF: &str = "checked on retrieval; qed"; - -/// Ethereum transaction hash type. -pub type HeaderHash = H256; - -/// Ethereum transaction hash type. -pub type TransactionHash = H256; - -/// Ethereum transaction type. -pub type Transaction = web3::types::Transaction; - -/// Ethereum header type. -pub type Header = web3::types::Block; - -/// Ethereum header type used in headers sync. -#[derive(Clone, Debug, PartialEq)] -pub struct SyncHeader(Header); - -impl std::ops::Deref for SyncHeader { - type Target = Header; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Ethereum header with transactions type. -pub type HeaderWithTransactions = web3::types::Block; - -/// Ethereum transaction receipt type. -pub type Receipt = web3::types::TransactionReceipt; - -/// Ethereum header ID. -pub type HeaderId = relay_utils::HeaderId; - -/// A raw Ethereum transaction that's been signed. -pub type SignedRawTx = Vec; - -impl From
for SyncHeader { - fn from(header: Header) -> Self { - Self(header) - } -} - -impl SourceHeader for SyncHeader { - fn id(&self) -> HeaderId { - relay_utils::HeaderId( - self.number.expect(HEADER_ID_PROOF).as_u64(), - self.hash.expect(HEADER_ID_PROOF), - ) - } - - fn parent_id(&self) -> HeaderId { - relay_utils::HeaderId(self.number.expect(HEADER_ID_PROOF).as_u64() - 1, self.parent_hash) - } -} diff --git a/polkadot/bridges/relays/client-kusama/Cargo.toml b/polkadot/bridges/relays/client-kusama/Cargo.toml index b9c397bca6c0198eaf5b162570804d80b67cd09a..a48d82f641b701f2907c75f436f039f7589d373e 100644 --- a/polkadot/bridges/relays/client-kusama/Cargo.toml +++ b/polkadot/bridges/relays/client-kusama/Cargo.toml @@ -6,20 +6,25 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } +codec = { package = "parity-scale-codec", version = "2.2.0" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } +scale-info = { version = "1.0", features = ["derive"] } # Bridge dependencies +bp-header-chain = { path = "../../primitives/header-chain" } bp-kusama = { path = "../../primitives/chain-kusama" } +bp-message-dispatch = { path = "../../primitives/message-dispatch" } +bp-messages = { path = "../../primitives/messages" } +bp-polkadot = { path = "../../primitives/chain-polkadot" } +bp-polkadot-core = { path = "../../primitives/polkadot-core" } +bp-runtime = { path = "../../primitives/runtime" } +bridge-runtime-common = { path = "../../bin/runtime-common" } +pallet-bridge-dispatch = { path = "../../modules/dispatch" } # Substrate Dependencies -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-kusama/src/lib.rs b/polkadot/bridges/relays/client-kusama/src/lib.rs index f2fba32dc1ed02f03ced2188d7905c542e29f9d1..a93726620ff61924e4457ad90da6a2623b2e1ef1 100644 --- a/polkadot/bridges/relays/client-kusama/src/lib.rs +++ b/polkadot/bridges/relays/client-kusama/src/lib.rs @@ -16,9 +16,17 @@ //! Types used to connect to the Kusama chain. -use relay_substrate_client::{Chain, ChainBase}; +use codec::Encode; +use relay_substrate_client::{ + Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, + UnsignedTransaction, +}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; +pub mod runtime; + /// Kusama header id. pub type HeaderId = relay_utils::HeaderId; @@ -31,18 +39,86 @@ impl ChainBase for Kusama { type Hash = bp_kusama::Hash; type Hasher = bp_kusama::Hasher; type Header = bp_kusama::Header; + + type AccountId = bp_kusama::AccountId; + type Balance = bp_kusama::Balance; + type Index = bp_kusama::Nonce; + type Signature = bp_kusama::Signature; } impl Chain for Kusama { const NAME: &'static str = "Kusama"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + const STORAGE_PROOF_OVERHEAD: u32 = bp_kusama::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_kusama::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - type AccountId = bp_kusama::AccountId; - type Index = bp_kusama::Nonce; type SignedBlock = bp_kusama::SignedBlock; - type Call = (); - type Balance = bp_kusama::Balance; + type Call = crate::runtime::Call; + type WeightToFee = bp_kusama::WeightToFee; +} + +impl ChainWithBalances for Kusama { + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + StorageKey(bp_kusama::account_info_storage_key(account_id)) + } +} + +impl TransactionSignScheme for Kusama { + type Chain = Kusama; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = crate::runtime::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + era: TransactionEraOf, + unsigned: UnsignedTransaction, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::new( + unsigned.call, + bp_kusama::SignedExtensions::new( + bp_kusama::VERSION, + era, + genesis_hash, + unsigned.nonce, + unsigned.tip, + ), + ) + .expect("SignedExtension never fails."); + + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + bp_kusama::UncheckedExtrinsic::new_signed( + call, + sp_runtime::MultiAddress::Id(signer.into_account()), + signature.into(), + extra, + ) + } + + fn is_signed(tx: &Self::SignedTransaction) -> bool { + tx.signature.is_some() + } + + fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { + tx.signature + .as_ref() + .map(|(address, _, _)| { + *address == bp_kusama::AccountId::from(*signer.public().as_array_ref()).into() + }) + .unwrap_or(false) + } + + fn parse_transaction(tx: Self::SignedTransaction) -> Option> { + let extra = &tx.signature.as_ref()?.2; + Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) + } } /// Kusama header type used in headers sync. pub type SyncHeader = relay_substrate_client::SyncHeader; + +/// Kusama signing params. +pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/client-kusama/src/runtime.rs b/polkadot/bridges/relays/client-kusama/src/runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..6d0ab5462d7c8418ca8c71ea2f5815762143df4c --- /dev/null +++ b/polkadot/bridges/relays/client-kusama/src/runtime.rs @@ -0,0 +1,154 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types that are specific to the Kusama runtime. + +use bp_messages::{LaneId, UnrewardedRelayersState}; +use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike}; +use bp_runtime::Chain; +use codec::{Compact, Decode, Encode}; +use frame_support::weights::Weight; +use scale_info::TypeInfo; +use sp_runtime::FixedU128; + +/// Unchecked Kusama extrinsic. +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +/// Polkadot account ownership digest from Kusama. +/// +/// The byte vector returned by this function should be signed with a Polkadot account private key. +/// This way, the owner of `kusama_account_id` on Kusama proves that the Polkadot account private +/// key is also under his control. +pub fn kusama_to_polkadot_account_ownership_digest( + polkadot_call: &Call, + kusama_account_id: AccountId, + polkadot_spec_version: SpecVersion, +) -> Vec +where + Call: codec::Encode, + AccountId: codec::Encode, + SpecVersion: codec::Encode, +{ + pallet_bridge_dispatch::account_ownership_digest( + polkadot_call, + kusama_account_id, + polkadot_spec_version, + bp_runtime::KUSAMA_CHAIN_ID, + bp_runtime::POLKADOT_CHAIN_ID, + ) +} + +/// Kusama Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Kusama chain. +/// Ideally this code would be auto-generated from metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Kusama +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/kusama/src/lib.rs) +#[allow(clippy::large_enum_variant)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +pub enum Call { + /// System pallet. + #[codec(index = 0)] + System(SystemCall), + /// Balances pallet. + #[codec(index = 4)] + Balances(BalancesCall), + /// Polkadot bridge pallet. + #[codec(index = 110)] + BridgePolkadotGrandpa(BridgePolkadotGrandpaCall), + /// Polkadot messages pallet. + #[codec(index = 111)] + BridgePolkadotMessages(BridgePolkadotMessagesCall), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum SystemCall { + #[codec(index = 1)] + remark(Vec), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum BalancesCall { + #[codec(index = 0)] + transfer(AccountAddress, Compact), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum BridgePolkadotGrandpaCall { + #[codec(index = 0)] + submit_finality_proof( + Box<::Header>, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum BridgePolkadotMessagesCall { + #[codec(index = 2)] + update_pallet_parameter(BridgePolkadotMessagesParameter), + #[codec(index = 3)] + send_message( + LaneId, + bp_message_dispatch::MessagePayload< + bp_kusama::AccountId, + bp_polkadot::AccountId, + bp_polkadot::AccountPublic, + Vec, + >, + bp_kusama::Balance, + ), + #[codec(index = 5)] + receive_messages_proof( + bp_polkadot::AccountId, + bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, + u32, + Weight, + ), + #[codec(index = 6)] + receive_messages_delivery_proof( + bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< + bp_polkadot::Hash, + >, + UnrewardedRelayersState, + ), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +pub enum BridgePolkadotMessagesParameter { + #[codec(index = 0)] + PolkadotToKusamaConversionRate(FixedU128), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} diff --git a/polkadot/bridges/relays/client-millau/Cargo.toml b/polkadot/bridges/relays/client-millau/Cargo.toml index e16f06f8528b674e0466986431d05bf5a2de55bc..49d9dade154c2d2cb6321b926eccad48459a87cb 100644 --- a/polkadot/bridges/relays/client-millau/Cargo.toml +++ b/polkadot/bridges/relays/client-millau/Cargo.toml @@ -6,13 +6,13 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } +codec = { package = "parity-scale-codec", version = "2.2.0" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Supported Chains +bp-millau = { path = "../../primitives/chain-millau" } millau-runtime = { path = "../../bin/millau/runtime" } # Substrate Dependencies @@ -21,5 +21,4 @@ frame-support = { git = "https://github.com/paritytech/substrate", branch = "mas frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-millau/src/lib.rs b/polkadot/bridges/relays/client-millau/src/lib.rs index 8597d9e59200f26d8e9b4958cf39751e8f5e2365..3f1aba1f3b372493b26d6217f206dbac66695bda 100644 --- a/polkadot/bridges/relays/client-millau/src/lib.rs +++ b/polkadot/bridges/relays/client-millau/src/lib.rs @@ -16,8 +16,11 @@ //! Types used to connect to the Millau-Substrate chain. -use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use codec::{Compact, Decode, Encode}; +use relay_substrate_client::{ + BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf, + TransactionSignScheme, UnsignedTransaction, +}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; @@ -34,17 +37,22 @@ impl ChainBase for Millau { type Hash = millau_runtime::Hash; type Hasher = millau_runtime::Hashing; type Header = millau_runtime::Header; + + type AccountId = millau_runtime::AccountId; + type Balance = millau_runtime::Balance; + type Index = millau_runtime::Index; + type Signature = millau_runtime::Signature; } impl Chain for Millau { const NAME: &'static str = "Millau"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); + const STORAGE_PROOF_OVERHEAD: u32 = bp_millau::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - type AccountId = millau_runtime::AccountId; - type Index = millau_runtime::Index; type SignedBlock = millau_runtime::SignedBlock; type Call = millau_runtime::Call; - type Balance = millau_runtime::Balance; + type WeightToFee = bp_millau::WeightToFee; } impl ChainWithBalances for Millau { @@ -64,25 +72,25 @@ impl TransactionSignScheme for Millau { fn sign_transaction( genesis_hash: ::Hash, signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, + era: TransactionEraOf, + unsigned: UnsignedTransaction, ) -> Self::SignedTransaction { let raw_payload = SignedPayload::from_raw( - call, + unsigned.call, ( frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(sp_runtime::generic::Era::Immortal), - frame_system::CheckNonce::::from(signer_nonce), + frame_system::CheckEra::::from(era.frame_era()), + frame_system::CheckNonce::::from(unsigned.nonce), frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), + pallet_transaction_payment::ChargeTransactionPayment::::from(unsigned.tip), ), ( millau_runtime::VERSION.spec_version, millau_runtime::VERSION.transaction_version, genesis_hash, - genesis_hash, + era.signed_payload(genesis_hash), (), (), (), @@ -92,7 +100,36 @@ impl TransactionSignScheme for Millau { let signer: sp_runtime::MultiSigner = signer.public().into(); let (call, extra, _) = raw_payload.deconstruct(); - millau_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra) + millau_runtime::UncheckedExtrinsic::new_signed( + call, + signer.into_account(), + signature.into(), + extra, + ) + } + + fn is_signed(tx: &Self::SignedTransaction) -> bool { + tx.signature.is_some() + } + + fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { + tx.signature + .as_ref() + .map(|(address, _, _)| { + *address == millau_runtime::Address::from(*signer.public().as_array_ref()) + }) + .unwrap_or(false) + } + + fn parse_transaction(tx: Self::SignedTransaction) -> Option> { + let extra = &tx.signature.as_ref()?.2; + Some(UnsignedTransaction { + call: tx.function, + nonce: Compact::>::decode(&mut &extra.4.encode()[..]).ok()?.into(), + tip: Compact::>::decode(&mut &extra.6.encode()[..]) + .ok()? + .into(), + }) } } diff --git a/polkadot/bridges/relays/client-polkadot/Cargo.toml b/polkadot/bridges/relays/client-polkadot/Cargo.toml index b148745f5a9872778d45e1bfb0ea8ea1484db90b..ff7748657941195d962df03f2f1c8374a570cff6 100644 --- a/polkadot/bridges/relays/client-polkadot/Cargo.toml +++ b/polkadot/bridges/relays/client-polkadot/Cargo.toml @@ -6,20 +6,25 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } +codec = { package = "parity-scale-codec", version = "2.2.0" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } +scale-info = { version = "1.0", features = ["derive"] } # Bridge dependencies +bp-header-chain = { path = "../../primitives/header-chain" } +bp-kusama = { path = "../../primitives/chain-kusama" } +bp-message-dispatch = { path = "../../primitives/message-dispatch" } +bp-messages = { path = "../../primitives/messages" } bp-polkadot = { path = "../../primitives/chain-polkadot" } +bp-polkadot-core = { path = "../../primitives/polkadot-core" } +bp-runtime = { path = "../../primitives/runtime" } +bridge-runtime-common = { path = "../../bin/runtime-common" } +pallet-bridge-dispatch = { path = "../../modules/dispatch" } # Substrate Dependencies -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-polkadot/src/lib.rs b/polkadot/bridges/relays/client-polkadot/src/lib.rs index e502463187d2c2c49790e36886d76bd73ff13895..e6ceabf583e0bfa3e27ebbce9641d57340cbb94d 100644 --- a/polkadot/bridges/relays/client-polkadot/src/lib.rs +++ b/polkadot/bridges/relays/client-polkadot/src/lib.rs @@ -16,9 +16,17 @@ //! Types used to connect to the Polkadot chain. -use relay_substrate_client::{Chain, ChainBase}; +use codec::Encode; +use relay_substrate_client::{ + Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, + UnsignedTransaction, +}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; +pub mod runtime; + /// Polkadot header id. pub type HeaderId = relay_utils::HeaderId; @@ -31,18 +39,86 @@ impl ChainBase for Polkadot { type Hash = bp_polkadot::Hash; type Hasher = bp_polkadot::Hasher; type Header = bp_polkadot::Header; + + type AccountId = bp_polkadot::AccountId; + type Balance = bp_polkadot::Balance; + type Index = bp_polkadot::Nonce; + type Signature = bp_polkadot::Signature; } impl Chain for Polkadot { const NAME: &'static str = "Polkadot"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + const STORAGE_PROOF_OVERHEAD: u32 = bp_polkadot::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_polkadot::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - type AccountId = bp_polkadot::AccountId; - type Index = bp_polkadot::Nonce; type SignedBlock = bp_polkadot::SignedBlock; - type Call = (); - type Balance = bp_polkadot::Balance; + type Call = crate::runtime::Call; + type WeightToFee = bp_polkadot::WeightToFee; +} + +impl ChainWithBalances for Polkadot { + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + StorageKey(bp_polkadot::account_info_storage_key(account_id)) + } +} + +impl TransactionSignScheme for Polkadot { + type Chain = Polkadot; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = crate::runtime::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + era: TransactionEraOf, + unsigned: UnsignedTransaction, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::new( + unsigned.call, + bp_polkadot::SignedExtensions::new( + bp_polkadot::VERSION, + era, + genesis_hash, + unsigned.nonce, + unsigned.tip, + ), + ) + .expect("SignedExtension never fails."); + + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + bp_polkadot::UncheckedExtrinsic::new_signed( + call, + sp_runtime::MultiAddress::Id(signer.into_account()), + signature.into(), + extra, + ) + } + + fn is_signed(tx: &Self::SignedTransaction) -> bool { + tx.signature.is_some() + } + + fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { + tx.signature + .as_ref() + .map(|(address, _, _)| { + *address == bp_polkadot::AccountId::from(*signer.public().as_array_ref()).into() + }) + .unwrap_or(false) + } + + fn parse_transaction(tx: Self::SignedTransaction) -> Option> { + let extra = &tx.signature.as_ref()?.2; + Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) + } } /// Polkadot header type used in headers sync. pub type SyncHeader = relay_substrate_client::SyncHeader; + +/// Polkadot signing params. +pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/client-polkadot/src/runtime.rs b/polkadot/bridges/relays/client-polkadot/src/runtime.rs new file mode 100644 index 0000000000000000000000000000000000000000..8b125a37843c84198d919be6298a05df27520c72 --- /dev/null +++ b/polkadot/bridges/relays/client-polkadot/src/runtime.rs @@ -0,0 +1,154 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types that are specific to the Polkadot runtime. + +use bp_messages::{LaneId, UnrewardedRelayersState}; +use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike}; +use bp_runtime::Chain; +use codec::{Compact, Decode, Encode}; +use frame_support::weights::Weight; +use scale_info::TypeInfo; +use sp_runtime::FixedU128; + +/// Unchecked Polkadot extrinsic. +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +/// Kusama account ownership digest from Polkadot. +/// +/// The byte vector returned by this function should be signed with a Kusama account private key. +/// This way, the owner of `kusam_account_id` on Polkadot proves that the Kusama account private key +/// is also under his control. +pub fn polkadot_to_kusama_account_ownership_digest( + kusama_call: &Call, + kusam_account_id: AccountId, + kusama_spec_version: SpecVersion, +) -> Vec +where + Call: codec::Encode, + AccountId: codec::Encode, + SpecVersion: codec::Encode, +{ + pallet_bridge_dispatch::account_ownership_digest( + kusama_call, + kusam_account_id, + kusama_spec_version, + bp_runtime::POLKADOT_CHAIN_ID, + bp_runtime::KUSAMA_CHAIN_ID, + ) +} + +/// Polkadot Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Polkadot chain. +/// Ideally this code would be auto-generated from metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Polkadot +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: [link](https://github.com/paritytech/kusama/blob/master/runtime/kusam/src/lib.rs) +#[allow(clippy::large_enum_variant)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +pub enum Call { + /// System pallet. + #[codec(index = 0)] + System(SystemCall), + /// Balances pallet. + #[codec(index = 5)] + Balances(BalancesCall), + /// Kusama bridge pallet. + #[codec(index = 110)] + BridgeKusamaGrandpa(BridgeKusamaGrandpaCall), + /// Kusama messages pallet. + #[codec(index = 111)] + BridgeKusamaMessages(BridgeKusamaMessagesCall), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum SystemCall { + #[codec(index = 1)] + remark(Vec), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum BalancesCall { + #[codec(index = 0)] + transfer(AccountAddress, Compact), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum BridgeKusamaGrandpaCall { + #[codec(index = 0)] + submit_finality_proof( + Box<::Header>, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +#[allow(non_camel_case_types)] +pub enum BridgeKusamaMessagesCall { + #[codec(index = 2)] + update_pallet_parameter(BridgeKusamaMessagesParameter), + #[codec(index = 3)] + send_message( + LaneId, + bp_message_dispatch::MessagePayload< + bp_polkadot::AccountId, + bp_kusama::AccountId, + bp_kusama::AccountPublic, + Vec, + >, + bp_polkadot::Balance, + ), + #[codec(index = 5)] + receive_messages_proof( + bp_kusama::AccountId, + bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, + u32, + Weight, + ), + #[codec(index = 6)] + receive_messages_delivery_proof( + bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< + bp_kusama::Hash, + >, + UnrewardedRelayersState, + ), +} + +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] +pub enum BridgeKusamaMessagesParameter { + #[codec(index = 0)] + KusamaToPolkadotConversionRate(FixedU128), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} diff --git a/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml b/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e4518c6877652fec50440ac9b432344827733a12 --- /dev/null +++ b/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "relay-rialto-parachain-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies + +bp-rialto = { path = "../../primitives/chain-rialto" } +rialto-parachain-runtime = { path = "../../bin/rialto-parachain/runtime" } + +# Substrate Dependencies + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs b/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ca299a0eeb78bdbb7c11eca5859c0e7bd375a60f --- /dev/null +++ b/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs @@ -0,0 +1,51 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Rialto-Substrate chain. + +use relay_substrate_client::{Chain, ChainBase}; +use std::time::Duration; + +/// Rialto header id. +pub type HeaderId = + relay_utils::HeaderId; + +/// Rialto parachain definition +#[derive(Debug, Clone, Copy)] +pub struct RialtoParachain; + +impl ChainBase for RialtoParachain { + type BlockNumber = rialto_parachain_runtime::BlockNumber; + type Hash = rialto_parachain_runtime::Hash; + type Hasher = rialto_parachain_runtime::Hashing; + type Header = rialto_parachain_runtime::Header; + + type AccountId = rialto_parachain_runtime::AccountId; + type Balance = rialto_parachain_runtime::Balance; + type Index = rialto_parachain_runtime::Index; + type Signature = rialto_parachain_runtime::Signature; +} + +impl Chain for RialtoParachain { + const NAME: &'static str = "RialtoParachain"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); + const STORAGE_PROOF_OVERHEAD: u32 = bp_rialto::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; + + type SignedBlock = rialto_parachain_runtime::SignedBlock; + type Call = rialto_parachain_runtime::Call; + type WeightToFee = bp_rialto::WeightToFee; +} diff --git a/polkadot/bridges/relays/client-rialto/Cargo.toml b/polkadot/bridges/relays/client-rialto/Cargo.toml index 88e8e12add4019c56a8181891f12d65e31391e73..3132b26d27fc183e8e334747888c0bb6fcf2eb03 100644 --- a/polkadot/bridges/relays/client-rialto/Cargo.toml +++ b/polkadot/bridges/relays/client-rialto/Cargo.toml @@ -6,13 +6,13 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } +codec = { package = "parity-scale-codec", version = "2.2.0" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } # Bridge dependencies +bp-rialto = { path = "../../primitives/chain-rialto" } rialto-runtime = { path = "../../bin/rialto/runtime" } # Substrate Dependencies @@ -21,5 +21,4 @@ frame-system = { git = "https://github.com/paritytech/substrate", branch = "mast frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-rialto/src/lib.rs b/polkadot/bridges/relays/client-rialto/src/lib.rs index 4a0023a87c4f57445d5fe196d592242445b68490..42ed8bce3bd9b432726d7ba138f16668e50ebd6e 100644 --- a/polkadot/bridges/relays/client-rialto/src/lib.rs +++ b/polkadot/bridges/relays/client-rialto/src/lib.rs @@ -16,8 +16,11 @@ //! Types used to connect to the Rialto-Substrate chain. -use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use codec::{Compact, Decode, Encode}; +use relay_substrate_client::{ + BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf, + TransactionSignScheme, UnsignedTransaction, +}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; @@ -34,17 +37,22 @@ impl ChainBase for Rialto { type Hash = rialto_runtime::Hash; type Hasher = rialto_runtime::Hashing; type Header = rialto_runtime::Header; + + type AccountId = rialto_runtime::AccountId; + type Balance = rialto_runtime::Balance; + type Index = rialto_runtime::Index; + type Signature = rialto_runtime::Signature; } impl Chain for Rialto { const NAME: &'static str = "Rialto"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); + const STORAGE_PROOF_OVERHEAD: u32 = bp_rialto::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - type AccountId = rialto_runtime::AccountId; - type Index = rialto_runtime::Index; type SignedBlock = rialto_runtime::SignedBlock; type Call = rialto_runtime::Call; - type Balance = rialto_runtime::Balance; + type WeightToFee = bp_rialto::WeightToFee; } impl ChainWithBalances for Rialto { @@ -64,25 +72,25 @@ impl TransactionSignScheme for Rialto { fn sign_transaction( genesis_hash: ::Hash, signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, + era: TransactionEraOf, + unsigned: UnsignedTransaction, ) -> Self::SignedTransaction { let raw_payload = SignedPayload::from_raw( - call, + unsigned.call, ( frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(sp_runtime::generic::Era::Immortal), - frame_system::CheckNonce::::from(signer_nonce), + frame_system::CheckEra::::from(era.frame_era()), + frame_system::CheckNonce::::from(unsigned.nonce), frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), + pallet_transaction_payment::ChargeTransactionPayment::::from(unsigned.tip), ), ( rialto_runtime::VERSION.spec_version, rialto_runtime::VERSION.transaction_version, genesis_hash, - genesis_hash, + era.signed_payload(genesis_hash), (), (), (), @@ -92,7 +100,34 @@ impl TransactionSignScheme for Rialto { let signer: sp_runtime::MultiSigner = signer.public().into(); let (call, extra, _) = raw_payload.deconstruct(); - rialto_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra) + rialto_runtime::UncheckedExtrinsic::new_signed( + call, + signer.into_account().into(), + signature.into(), + extra, + ) + } + + fn is_signed(tx: &Self::SignedTransaction) -> bool { + tx.signature.is_some() + } + + fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { + tx.signature + .as_ref() + .map(|(address, _, _)| *address == rialto_runtime::Address::Id(signer.public().into())) + .unwrap_or(false) + } + + fn parse_transaction(tx: Self::SignedTransaction) -> Option> { + let extra = &tx.signature.as_ref()?.2; + Some(UnsignedTransaction { + call: tx.function, + nonce: Compact::>::decode(&mut &extra.4.encode()[..]).ok()?.into(), + tip: Compact::>::decode(&mut &extra.6.encode()[..]) + .ok()? + .into(), + }) } } diff --git a/polkadot/bridges/relays/client-rococo/Cargo.toml b/polkadot/bridges/relays/client-rococo/Cargo.toml index 5611ac27b1ce4b8409dff28a004e9219fe33eb58..28e97d3bf0cec3226402ac704eafe75dc3d7d4ad 100644 --- a/polkadot/bridges/relays/client-rococo/Cargo.toml +++ b/polkadot/bridges/relays/client-rococo/Cargo.toml @@ -6,12 +6,13 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } +codec = { package = "parity-scale-codec", version = "2.2.0" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } +scale-info = { version = "1.0", features = ["derive"] } # Bridge dependencies + bridge-runtime-common = { path = "../../bin/runtime-common" } bp-header-chain = { path = "../../primitives/header-chain" } bp-message-dispatch = { path = "../../primitives/message-dispatch" } @@ -24,9 +25,7 @@ pallet-bridge-dispatch = { path = "../../modules/dispatch" } pallet-bridge-messages = { path = "../../modules/messages" } # Substrate Dependencies -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } + frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-rococo/src/lib.rs b/polkadot/bridges/relays/client-rococo/src/lib.rs index 5a7d8999f7f16f71e068ec6549c5606dd96ffafe..ad61e3cfd6437be5cf2c964d9f3f569beda51bdd 100644 --- a/polkadot/bridges/relays/client-rococo/src/lib.rs +++ b/polkadot/bridges/relays/client-rococo/src/lib.rs @@ -17,7 +17,10 @@ //! Types used to connect to the Rococo-Substrate chain. use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use relay_substrate_client::{ + Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, + UnsignedTransaction, +}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; @@ -39,17 +42,22 @@ impl ChainBase for Rococo { type Hash = bp_rococo::Hash; type Hasher = bp_rococo::Hashing; type Header = bp_rococo::Header; + + type AccountId = bp_rococo::AccountId; + type Balance = bp_rococo::Balance; + type Index = bp_rococo::Nonce; + type Signature = bp_rococo::Signature; } impl Chain for Rococo { const NAME: &'static str = "Rococo"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + const STORAGE_PROOF_OVERHEAD: u32 = bp_rococo::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - type AccountId = bp_rococo::AccountId; - type Index = bp_rococo::Index; type SignedBlock = bp_rococo::SignedBlock; type Call = crate::runtime::Call; - type Balance = bp_rococo::Balance; + type WeightToFee = bp_rococo::WeightToFee; } impl ChainWithBalances for Rococo { @@ -66,17 +74,17 @@ impl TransactionSignScheme for Rococo { fn sign_transaction( genesis_hash: ::Hash, signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, + era: TransactionEraOf, + unsigned: UnsignedTransaction, ) -> Self::SignedTransaction { let raw_payload = SignedPayload::new( - call, + unsigned.call, bp_rococo::SignedExtensions::new( bp_rococo::VERSION, - sp_runtime::generic::Era::Immortal, + era, genesis_hash, - signer_nonce, - 0, + unsigned.nonce, + unsigned.tip, ), ) .expect("SignedExtension never fails."); @@ -92,6 +100,24 @@ impl TransactionSignScheme for Rococo { extra, ) } + + fn is_signed(tx: &Self::SignedTransaction) -> bool { + tx.signature.is_some() + } + + fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { + tx.signature + .as_ref() + .map(|(address, _, _)| { + *address == bp_rococo::AccountId::from(*signer.public().as_array_ref()).into() + }) + .unwrap_or(false) + } + + fn parse_transaction(tx: Self::SignedTransaction) -> Option> { + let extra = &tx.signature.as_ref()?.2; + Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) + } } /// Rococo signing params. diff --git a/polkadot/bridges/relays/client-rococo/src/runtime.rs b/polkadot/bridges/relays/client-rococo/src/runtime.rs index 6dbd40bee56086fff1cd102fd251223c51ab3945..effe6e5c60a9d87fe7163c156934a0e966d83f5c 100644 --- a/polkadot/bridges/relays/client-rococo/src/runtime.rs +++ b/polkadot/bridges/relays/client-rococo/src/runtime.rs @@ -21,9 +21,7 @@ use bp_polkadot_core::PolkadotLike; use bp_runtime::Chain; use codec::{Decode, Encode}; use frame_support::weights::Weight; - -/// Instance of messages pallet that is used to bridge with Wococo chain. -pub type WithWococoMessagesInstance = pallet_bridge_messages::Instance1; +use scale_info::TypeInfo; /// Unchecked Rococo extrinsic. pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; @@ -55,15 +53,15 @@ where /// Rococo Runtime `Call` enum. /// /// The enum represents a subset of possible `Call`s we can send to Rococo chain. -/// Ideally this code would be auto-generated from Metadata, because we want to +/// Ideally this code would be auto-generated from metadata, because we want to /// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. /// /// All entries here (like pretty much in the entire file) must be kept in sync with Rococo /// `construct_runtime`, so that we maintain SCALE-compatibility. /// -/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs +/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs) #[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Call { /// System pallet. #[codec(index = 0)] @@ -76,26 +74,26 @@ pub enum Call { BridgeMessagesWococo(BridgeMessagesWococoCall), } -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] #[allow(non_camel_case_types)] pub enum SystemCall { #[codec(index = 1)] remark(Vec), } -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] #[allow(non_camel_case_types)] pub enum BridgeGrandpaWococoCall { #[codec(index = 0)] submit_finality_proof( - ::Header, + Box<::Header>, bp_header_chain::justification::GrandpaJustification<::Header>, ), #[codec(index = 1)] initialize(bp_header_chain::InitializationData<::Header>), } -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] #[allow(non_camel_case_types)] pub enum BridgeMessagesWococoCall { #[codec(index = 3)] @@ -118,7 +116,9 @@ pub enum BridgeMessagesWococoCall { ), #[codec(index = 6)] receive_messages_delivery_proof( - bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof, + bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< + bp_wococo::Hash, + >, UnrewardedRelayersState, ), } diff --git a/polkadot/bridges/relays/client-substrate/Cargo.toml b/polkadot/bridges/relays/client-substrate/Cargo.toml index f5c2e2656059362d0758e5051e034e491057216d..2eb07fdcde4674c6ad286fba51f8d1f5374bc3b1 100644 --- a/polkadot/bridges/relays/client-substrate/Cargo.toml +++ b/polkadot/bridges/relays/client-substrate/Cargo.toml @@ -8,20 +8,20 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] async-std = { version = "1.6.5", features = ["attributes"] } async-trait = "0.1.40" -codec = { package = "parity-scale-codec", version = "2.0.0" } -jsonrpsee-proc-macros = "=0.2.0-alpha.6" -jsonrpsee-ws-client = "=0.2.0-alpha.6" +codec = { package = "parity-scale-codec", version = "2.2.0" } +jsonrpsee-proc-macros = "0.3.1" +jsonrpsee-ws-client = "0.3.1" log = "0.4.11" num-traits = "0.2" rand = "0.7" +tokio = "1.8" +thiserror = "1.0.26" # Bridge dependencies bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } bp-runtime = { path = "../../primitives/runtime" } finality-relay = { path = "../finality" } -headers-relay = { path = "../headers" } relay-utils = { path = "../utils" } # Substrate Dependencies @@ -29,12 +29,15 @@ relay-utils = { path = "../utils" } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-substrate/src/chain.rs b/polkadot/bridges/relays/client-substrate/src/chain.rs index 886b1bde1ce6740b9526dfeeca5f780d191beac4..75789ce37f308572ce96dac8d8886e4924db5672 100644 --- a/polkadot/bridges/relays/client-substrate/src/chain.rs +++ b/polkadot/bridges/relays/client-substrate/src/chain.rs @@ -14,16 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use bp_runtime::Chain as ChainBase; -use frame_support::Parameter; -use jsonrpsee_ws_client::{DeserializeOwned, Serialize}; -use num_traits::{CheckedSub, SaturatingAdd, Zero}; +use bp_runtime::{Chain as ChainBase, HashOf, TransactionEraOf}; +use codec::{Codec, Encode}; +use frame_support::weights::WeightToFeePolynomial; +use jsonrpsee_ws_client::types::{DeserializeOwned, Serialize}; +use num_traits::Zero; +use sc_transaction_pool_api::TransactionStatus; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{ generic::SignedBlock, - traits::{ - AtLeast32Bit, Block as BlockT, Dispatchable, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member, - }, + traits::{Block as BlockT, Dispatchable, Member}, EncodedJustification, }; use std::{fmt::Debug, time::Duration}; @@ -37,30 +37,27 @@ pub trait Chain: ChainBase + Clone { /// How often blocks are produced on that chain. It's suggested to set this value /// to match the block time of the chain. const AVERAGE_BLOCK_INTERVAL: Duration; + /// Maximal expected storage proof overhead (in bytes). + const STORAGE_PROOF_OVERHEAD: u32; + /// Maximal size (in bytes) of SCALE-encoded account id on this chain. + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32; - /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default; - /// Index of a transaction used by the chain. - type Index: Parameter - + Member - + MaybeSerialize - + Debug - + Default - + MaybeDisplay - + DeserializeOwned - + AtLeast32Bit - + Copy; /// Block type. type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; /// The aggregated `Call` type. - type Call: Dispatchable + Debug; - /// Balance of an account in native tokens. - /// - /// The chain may support multiple tokens, but this particular type is for token that is used - /// to pay for transaction dispatch, to reward different relayers (headers, messages), etc. - type Balance: Parameter + Member + DeserializeOwned + Clone + Copy + CheckedSub + PartialOrd + SaturatingAdd + Zero; + type Call: Clone + Dispatchable + Debug; + + /// Type that is used by the chain, to convert from weight to fee. + type WeightToFee: WeightToFeePolynomial; } +/// Call type used by the chain. +pub type CallOf = ::Call; +/// Weight-to-Fee type used by the chain. +pub type WeightToFeeOf = ::WeightToFee; +/// Transaction status of the chain. +pub type TransactionStatusOf = TransactionStatus, HashOf>; + /// Substrate-based chain with `frame_system::Config::AccountData` set to /// the `pallet_balances::AccountData`. pub trait ChainWithBalances: Chain { @@ -68,14 +65,43 @@ pub trait ChainWithBalances: Chain { fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey; } +/// SCALE-encoded extrinsic. +pub type EncodedExtrinsic = Vec; + /// Block with justification. pub trait BlockWithJustification
{ /// Return block header. fn header(&self) -> Header; + /// Return encoded block extrinsics. + fn extrinsics(&self) -> Vec; /// Return block justification, if known. fn justification(&self) -> Option<&EncodedJustification>; } +/// Transaction before it is signed. +#[derive(Clone, Debug)] +pub struct UnsignedTransaction { + /// Runtime call of this transaction. + pub call: C::Call, + /// Transaction nonce. + pub nonce: C::Index, + /// Tip included into transaction. + pub tip: C::Balance, +} + +impl UnsignedTransaction { + /// Create new unsigned transaction with given call, nonce and zero tip. + pub fn new(call: C::Call, nonce: C::Index) -> Self { + Self { call, nonce, tip: Zero::zero() } + } + + /// Set transaction tip. + pub fn tip(mut self, tip: C::Balance) -> Self { + self.tip = tip; + self + } +} + /// Substrate-based chain transactions signing scheme. pub trait TransactionSignScheme { /// Chain that this scheme is to be used. @@ -83,15 +109,26 @@ pub trait TransactionSignScheme { /// Type of key pairs used to sign transactions. type AccountKeyPair: Pair; /// Signed transaction. - type SignedTransaction; + type SignedTransaction: Clone + Debug + Codec + Send + 'static; /// Create transaction for given runtime call, signed by given account. fn sign_transaction( genesis_hash: ::Hash, signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, + era: TransactionEraOf, + unsigned: UnsignedTransaction, ) -> Self::SignedTransaction; + + /// Returns true if transaction is signed. + fn is_signed(tx: &Self::SignedTransaction) -> bool; + + /// Returns true if transaction is signed by given signer. + fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool; + + /// Parse signed transaction into its unsigned part. + /// + /// Returns `None` if signed transaction has unsupported format. + fn parse_transaction(tx: Self::SignedTransaction) -> Option>; } impl BlockWithJustification for SignedBlock { @@ -99,6 +136,10 @@ impl BlockWithJustification for SignedBlock self.block.header().clone() } + fn extrinsics(&self) -> Vec { + self.block.extrinsics().iter().map(Encode::encode).collect() + } + fn justification(&self) -> Option<&EncodedJustification> { self.justifications .as_ref() diff --git a/polkadot/bridges/relays/client-substrate/src/client.rs b/polkadot/bridges/relays/client-substrate/src/client.rs index e4f503b91752ba2cfa5908a5efdb339ef9917315..1902875c938107c2e2e54eb012f1ad5fe2bf7f97 100644 --- a/polkadot/bridges/relays/client-substrate/src/client.rs +++ b/polkadot/bridges/relays/client-substrate/src/client.rs @@ -16,27 +16,46 @@ //! Substrate node client. -use crate::chain::{Chain, ChainWithBalances}; -use crate::rpc::Substrate; -use crate::{ConnectionParams, Error, Result}; +use crate::{ + chain::{Chain, ChainWithBalances, TransactionStatusOf}, + rpc::Substrate, + ConnectionParams, Error, HashOf, HeaderIdOf, Result, +}; use async_std::sync::{Arc, Mutex}; -use codec::Decode; +use async_trait::async_trait; +use codec::{Decode, Encode}; use frame_system::AccountInfo; -use jsonrpsee_ws_client::{traits::SubscriptionClient, v2::params::JsonRpcParams, DeserializeOwned}; -use jsonrpsee_ws_client::{Subscription, WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; -use num_traits::Zero; +use futures::{SinkExt, StreamExt}; +use jsonrpsee_ws_client::{ + types::{ + self as jsonrpsee_types, traits::SubscriptionClient, v2::params::JsonRpcParams, + DeserializeOwned, + }, + WsClient as RpcClient, WsClientBuilder as RpcClientBuilder, +}; +use num_traits::{Bounded, Zero}; use pallet_balances::AccountData; -use relay_utils::relay_loop::RECONNECT_DELAY; -use sp_core::{storage::StorageKey, Bytes}; +use pallet_transaction_payment::InclusionFee; +use relay_utils::{relay_loop::RECONNECT_DELAY, HeaderId}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Hasher, +}; +use sp_runtime::{ + traits::Header as HeaderT, + transaction_validity::{TransactionSource, TransactionValidity}, +}; use sp_trie::StorageProof; use sp_version::RuntimeVersion; +use std::{convert::TryFrom, future::Future}; const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; +const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; /// Opaque justifications subscription type. -pub type JustificationsSubscription = Subscription; +pub struct Subscription(Mutex>>); /// Opaque GRANDPA authorities set. pub type OpaqueGrandpaAuthoritiesSet = Vec; @@ -45,21 +64,37 @@ pub type OpaqueGrandpaAuthoritiesSet = Vec; /// /// Cloning `Client` is a cheap operation. pub struct Client { + /// Tokio runtime handle. + tokio: Arc, /// Client connection params. params: ConnectionParams, /// Substrate RPC client. client: Arc, /// Genesis block hash. - genesis_hash: C::Hash, - /// If several tasks are submitting their transactions simultaneously using `submit_signed_extrinsic` - /// method, they may get the same transaction nonce. So one of transactions will be rejected - /// from the pool. This lock is here to prevent situations like that. + genesis_hash: HashOf, + /// If several tasks are submitting their transactions simultaneously using + /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of + /// transactions will be rejected from the pool. This lock is here to prevent situations like + /// that. submit_signed_extrinsic_lock: Arc>, } +#[async_trait] +impl relay_utils::relay_loop::Client for Client { + type Error = Error; + + async fn reconnect(&mut self) -> Result<()> { + let (tokio, client) = Self::build_client(self.params.clone()).await?; + self.tokio = tokio; + self.client = client; + Ok(()) + } +} + impl Clone for Client { fn clone(&self) -> Self { Client { + tokio: self.tokio.clone(), params: self.params.clone(), client: self.client.clone(), genesis_hash: self.genesis_hash, @@ -70,9 +105,7 @@ impl Clone for Client { impl std::fmt::Debug for Client { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client") - .field("genesis_hash", &self.genesis_hash) - .finish() + fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish() } } @@ -101,12 +134,18 @@ impl Client { /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection /// has been established or error otherwise. pub async fn try_connect(params: ConnectionParams) -> Result { - let client = Self::build_client(params.clone()).await?; + let (tokio, client) = Self::build_client(params.clone()).await?; let number: C::BlockNumber = Zero::zero(); - let genesis_hash = Substrate::::chain_get_block_hash(&*client, number).await?; + let genesis_hash_client = client.clone(); + let genesis_hash = tokio + .spawn(async move { + Substrate::::chain_get_block_hash(&*genesis_hash_client, number).await + }) + .await??; Ok(Self { + tokio, params, client, genesis_hash, @@ -114,39 +153,43 @@ impl Client { }) } - /// Reopen client connection. - pub async fn reconnect(&mut self) -> Result<()> { - self.client = Self::build_client(self.params.clone()).await?; - Ok(()) - } - /// Build client to use in connection. - async fn build_client(params: ConnectionParams) -> Result> { + async fn build_client( + params: ConnectionParams, + ) -> Result<(Arc, Arc)> { + let tokio = tokio::runtime::Runtime::new()?; let uri = format!( "{}://{}:{}", if params.secure { "wss" } else { "ws" }, params.host, params.port, ); - let client = RpcClientBuilder::default() - .max_notifs_per_subscription(MAX_SUBSCRIPTION_CAPACITY) - .build(&uri) - .await?; - - Ok(Arc::new(client)) + let client = tokio + .spawn(async move { + RpcClientBuilder::default() + .max_notifs_per_subscription(MAX_SUBSCRIPTION_CAPACITY) + .build(&uri) + .await + }) + .await??; + + Ok((Arc::new(tokio), Arc::new(client))) } } impl Client { /// Returns true if client is connected to at least one peer and is in synced state. pub async fn ensure_synced(&self) -> Result<()> { - let health = Substrate::::system_health(&*self.client).await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } + self.jsonrpsee_execute(|client| async move { + let health = Substrate::::system_health(&*client).await?; + let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); + if is_synced { + Ok(()) + } else { + Err(Error::ClientNotSynced(health)) + } + }) + .await } /// Return hash of the genesis block. @@ -156,7 +199,15 @@ impl Client { /// Return hash of the best finalized block. pub async fn best_finalized_header_hash(&self) -> Result { - Ok(Substrate::::chain_get_finalized_head(&*self.client).await?) + self.jsonrpsee_execute(|client| async move { + Ok(Substrate::::chain_get_finalized_head(&*client).await?) + }) + .await + } + + /// Return number of the best finalized block. + pub async fn best_finalized_header_number(&self) -> Result { + Ok(*self.header_by_hash(self.best_finalized_header_hash().await?).await?.number()) } /// Returns the best Substrate header. @@ -164,12 +215,18 @@ impl Client { where C::Header: DeserializeOwned, { - Ok(Substrate::::chain_get_header(&*self.client, None).await?) + self.jsonrpsee_execute(|client| async move { + Ok(Substrate::::chain_get_header(&*client, None).await?) + }) + .await } /// Get a Substrate block from its hash. pub async fn get_block(&self, block_hash: Option) -> Result { - Ok(Substrate::::chain_get_block(&*self.client, block_hash).await?) + self.jsonrpsee_execute(move |client| async move { + Ok(Substrate::::chain_get_block(&*client, block_hash).await?) + }) + .await } /// Get a Substrate header by its hash. @@ -177,12 +234,18 @@ impl Client { where C::Header: DeserializeOwned, { - Ok(Substrate::::chain_get_header(&*self.client, block_hash).await?) + self.jsonrpsee_execute(move |client| async move { + Ok(Substrate::::chain_get_header(&*client, block_hash).await?) + }) + .await } /// Get a Substrate block hash by its number. pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - Ok(Substrate::::chain_get_block_hash(&*self.client, number).await?) + self.jsonrpsee_execute(move |client| async move { + Ok(Substrate::::chain_get_block_hash(&*client, number).await?) + }) + .await } /// Get a Substrate header by its number. @@ -191,51 +254,84 @@ impl Client { C::Header: DeserializeOwned, { let block_hash = Self::block_hash_by_number(self, block_number).await?; - Ok(Self::header_by_hash(self, block_hash).await?) + let header_by_hash = Self::header_by_hash(self, block_hash).await?; + Ok(header_by_hash) } /// Return runtime version. pub async fn runtime_version(&self) -> Result { - Ok(Substrate::::state_runtime_version(&*self.client).await?) + self.jsonrpsee_execute(move |client| async move { + Ok(Substrate::::state_runtime_version(&*client).await?) + }) + .await } /// Read value from runtime storage. - pub async fn storage_value(&self, storage_key: StorageKey) -> Result> { - Substrate::::state_get_storage(&*self.client, storage_key) + pub async fn storage_value( + &self, + storage_key: StorageKey, + block_hash: Option, + ) -> Result> { + self.raw_storage_value(storage_key, block_hash) .await? - .map(|encoded_value| T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed)) + .map(|encoded_value| { + T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) + }) .transpose() } + /// Read raw value from runtime storage. + pub async fn raw_storage_value( + &self, + storage_key: StorageKey, + block_hash: Option, + ) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(Substrate::::state_get_storage(&*client, storage_key, block_hash).await?) + }) + .await + } + /// Return native tokens balance of the account. pub async fn free_native_balance(&self, account: C::AccountId) -> Result where C: ChainWithBalances, { - let storage_key = C::account_info_storage_key(&account); - let encoded_account_data = Substrate::::state_get_storage(&*self.client, storage_key) - .await? - .ok_or(Error::AccountDoesNotExist)?; - let decoded_account_data = - AccountInfo::>::decode(&mut &encoded_account_data.0[..]) - .map_err(Error::ResponseParseFailed)?; - Ok(decoded_account_data.data.free) + self.jsonrpsee_execute(move |client| async move { + let storage_key = C::account_info_storage_key(&account); + let encoded_account_data = + Substrate::::state_get_storage(&*client, storage_key, None) + .await? + .ok_or(Error::AccountDoesNotExist)?; + let decoded_account_data = AccountInfo::>::decode( + &mut &encoded_account_data.0[..], + ) + .map_err(Error::ResponseParseFailed)?; + Ok(decoded_account_data.data.free) + }) + .await } /// Get the nonce of the given Substrate account. /// /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. pub async fn next_account_index(&self, account: C::AccountId) -> Result { - Ok(Substrate::::system_account_next_index(&*self.client, account).await?) + self.jsonrpsee_execute(move |client| async move { + Ok(Substrate::::system_account_next_index(&*client, account).await?) + }) + .await } /// Submit unsigned extrinsic for inclusion in a block. /// /// Note: The given transaction needs to be SCALE encoded beforehand. pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { - let tx_hash = Substrate::::author_submit_extrinsic(&*self.client, transaction).await?; - log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash); - Ok(tx_hash) + self.jsonrpsee_execute(move |client| async move { + let tx_hash = Substrate::::author_submit_extrinsic(&*client, transaction).await?; + log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash); + Ok(tx_hash) + }) + .await } /// Submit an extrinsic signed by given account. @@ -248,51 +344,242 @@ impl Client { pub async fn submit_signed_extrinsic( &self, extrinsic_signer: C::AccountId, - prepare_extrinsic: impl FnOnce(C::Index) -> Bytes, + prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Index) -> Bytes + Send + 'static, ) -> Result { let _guard = self.submit_signed_extrinsic_lock.lock().await; let transaction_nonce = self.next_account_index(extrinsic_signer).await?; - let extrinsic = prepare_extrinsic(transaction_nonce); - let tx_hash = Substrate::::author_submit_extrinsic(&*self.client, extrinsic).await?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) + let best_header = self.best_header().await?; + let best_header_id = HeaderId(*best_header.number(), best_header.hash()); + self.jsonrpsee_execute(move |client| async move { + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce); + let tx_hash = Substrate::::author_submit_extrinsic(&*client, extrinsic).await?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(tx_hash) + }) + .await + } + + /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status + /// after submission. + pub async fn submit_and_watch_signed_extrinsic( + &self, + extrinsic_signer: C::AccountId, + prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Index) -> Bytes + Send + 'static, + ) -> Result>> { + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(extrinsic_signer).await?; + let best_header = self.best_header().await?; + let best_header_id = HeaderId(*best_header.number(), best_header.hash()); + let subscription = self + .jsonrpsee_execute(move |client| async move { + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce); + let tx_hash = C::Hasher::hash(&extrinsic.0); + let subscription = client + .subscribe( + "author_submitAndWatchExtrinsic", + JsonRpcParams::Array(vec![jsonrpsee_types::to_json_value(extrinsic) + .map_err(|e| Error::RpcError(e.into()))?]), + "author_unwatchExtrinsic", + ) + .await?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(subscription) + }) + .await?; + let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); + self.tokio.spawn(Subscription::background_worker( + C::NAME.into(), + "extrinsic".into(), + subscription, + sender, + )); + Ok(Subscription(Mutex::new(receiver))) + } + + /// Returns pending extrinsics from transaction pool. + pub async fn pending_extrinsics(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(Substrate::::author_pending_extrinsics(&*client).await?) + }) + .await + } + + /// Validate transaction at given block state. + pub async fn validate_transaction( + &self, + at_block: C::Hash, + transaction: SignedTransaction, + ) -> Result { + self.jsonrpsee_execute(move |client| async move { + let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string(); + let data = Bytes((TransactionSource::External, transaction, at_block).encode()); + + let encoded_response = + Substrate::::state_call(&*client, call, data, Some(at_block)).await?; + let validity = TransactionValidity::decode(&mut &encoded_response.0[..]) + .map_err(Error::ResponseParseFailed)?; + + Ok(validity) + }) + .await + } + + /// Estimate fee that will be spent on given extrinsic. + pub async fn estimate_extrinsic_fee( + &self, + transaction: Bytes, + ) -> Result> { + self.jsonrpsee_execute(move |client| async move { + let fee_details = + Substrate::::payment_query_fee_details(&*client, transaction, None).await?; + let inclusion_fee = fee_details + .inclusion_fee + .map(|inclusion_fee| InclusionFee { + base_fee: C::Balance::try_from(inclusion_fee.base_fee.into_u256()) + .unwrap_or_else(|_| C::Balance::max_value()), + len_fee: C::Balance::try_from(inclusion_fee.len_fee.into_u256()) + .unwrap_or_else(|_| C::Balance::max_value()), + adjusted_weight_fee: C::Balance::try_from( + inclusion_fee.adjusted_weight_fee.into_u256(), + ) + .unwrap_or_else(|_| C::Balance::max_value()), + }) + .unwrap_or_else(|| InclusionFee { + base_fee: Zero::zero(), + len_fee: Zero::zero(), + adjusted_weight_fee: Zero::zero(), + }); + Ok(inclusion_fee) + }) + .await } /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set(&self, block: C::Hash) -> Result { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); + pub async fn grandpa_authorities_set( + &self, + block: C::Hash, + ) -> Result { + self.jsonrpsee_execute(move |client| async move { + let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); + let data = Bytes(Vec::new()); - let encoded_response = Substrate::::state_call(&*self.client, call, data, Some(block)).await?; - let authority_list = encoded_response.0; + let encoded_response = + Substrate::::state_call(&*client, call, data, Some(block)).await?; + let authority_list = encoded_response.0; - Ok(authority_list) + Ok(authority_list) + }) + .await } /// Execute runtime call at given block. - pub async fn state_call(&self, method: String, data: Bytes, at_block: Option) -> Result { - Substrate::::state_call(&*self.client, method, data, at_block) - .await - .map_err(Into::into) + pub async fn state_call( + &self, + method: String, + data: Bytes, + at_block: Option, + ) -> Result { + self.jsonrpsee_execute(move |client| async move { + Substrate::::state_call(&*client, method, data, at_block) + .await + .map_err(Into::into) + }) + .await } /// Returns storage proof of given storage keys. - pub async fn prove_storage(&self, keys: Vec, at_block: C::Hash) -> Result { - Substrate::::state_prove_storage(&*self.client, keys, Some(at_block)) - .await - .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect())) - .map_err(Into::into) + pub async fn prove_storage( + &self, + keys: Vec, + at_block: C::Hash, + ) -> Result { + self.jsonrpsee_execute(move |client| async move { + Substrate::::state_prove_storage(&*client, keys, Some(at_block)) + .await + .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect())) + .map_err(Into::into) + }) + .await } /// Return new justifications stream. - pub async fn subscribe_justifications(&self) -> Result { - Ok(self - .client - .subscribe( - "grandpa_subscribeJustifications", - JsonRpcParams::NoParams, - "grandpa_unsubscribeJustifications", - ) - .await?) + pub async fn subscribe_justifications(&self) -> Result> { + let subscription = self + .jsonrpsee_execute(move |client| async move { + Ok(client + .subscribe( + "grandpa_subscribeJustifications", + JsonRpcParams::NoParams, + "grandpa_unsubscribeJustifications", + ) + .await?) + }) + .await?; + let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); + self.tokio.spawn(Subscription::background_worker( + C::NAME.into(), + "justification".into(), + subscription, + sender, + )); + Ok(Subscription(Mutex::new(receiver))) + } + + /// Execute jsonrpsee future in tokio context. + async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result + where + MF: FnOnce(Arc) -> F + Send + 'static, + F: Future> + Send, + T: Send + 'static, + { + let client = self.client.clone(); + self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await? + } +} + +impl Subscription { + /// Return next item from the subscription. + pub async fn next(&self) -> Result> { + let mut receiver = self.0.lock().await; + let item = receiver.next().await; + Ok(item.unwrap_or(None)) + } + + /// Background worker that is executed in tokio context as `jsonrpsee` requires. + async fn background_worker( + chain_name: String, + item_type: String, + mut subscription: jsonrpsee_types::Subscription, + mut sender: futures::channel::mpsc::Sender>, + ) { + loop { + match subscription.next().await { + Ok(Some(item)) => + if sender.send(Some(item)).await.is_err() { + break + }, + Ok(None) => { + log::trace!( + target: "bridge", + "{} {} subscription stream has returned None. Stream needs to be restarted.", + chain_name, + item_type, + ); + let _ = sender.send(None).await; + break + }, + Err(e) => { + log::trace!( + target: "bridge", + "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted.", + chain_name, + item_type, + e, + ); + let _ = sender.send(None).await; + break + }, + } + } } } diff --git a/polkadot/bridges/relays/client-substrate/src/error.rs b/polkadot/bridges/relays/client-substrate/src/error.rs index 304229ede1986b733328a3c35d11ee59c8f38f05..33b9b22a03efe4d74f9375114cb4c5aa22663eb9 100644 --- a/polkadot/bridges/relays/client-substrate/src/error.rs +++ b/polkadot/bridges/relays/client-substrate/src/error.rs @@ -16,54 +16,55 @@ //! Substrate node RPC errors. -use jsonrpsee_ws_client::Error as RpcError; +use jsonrpsee_ws_client::types::Error as RpcError; use relay_utils::MaybeConnectionError; use sc_rpc_api::system::Health; +use sp_runtime::transaction_validity::TransactionValidityError; +use thiserror::Error; /// Result type used by Substrate client. pub type Result = std::result::Result; /// Errors that can occur only when interacting with /// a Substrate node through RPC. -#[derive(Debug)] +#[derive(Error, Debug)] pub enum Error { + /// IO error. + #[error("IO error: {0}")] + Io(#[from] std::io::Error), /// An error that can occur when making a request to /// an JSON-RPC server. - RpcError(RpcError), + #[error("RPC error: {0}")] + RpcError(#[from] RpcError), /// The response from the server could not be SCALE decoded. - ResponseParseFailed(codec::Error), + #[error("Response parse failed: {0}")] + ResponseParseFailed(#[from] codec::Error), /// The Substrate bridge pallet has not yet been initialized. + #[error("The Substrate bridge pallet has not been initialized yet.")] UninitializedBridgePallet, /// Account does not exist on the chain. + #[error("Account does not exist on the chain.")] AccountDoesNotExist, /// Runtime storage is missing mandatory ":code:" entry. + #[error("Mandatory :code: entry is missing from runtime storage.")] MissingMandatoryCodeEntry, /// The client we're connected to is not synced, so we can't rely on its state. + #[error("Substrate client is not synced {0}.")] ClientNotSynced(Health), /// An error has happened when we have tried to parse storage proof. + #[error("Error when parsing storage proof: {0:?}.")] StorageProofError(bp_runtime::StorageProofError), + /// The Substrate transaction is invalid. + #[error("Substrate transaction is invalid: {0:?}")] + TransactionInvalid(#[from] TransactionValidityError), /// Custom logic error. + #[error("{0}")] Custom(String), } -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Self::RpcError(ref e) => Some(e), - Self::ResponseParseFailed(ref e) => Some(e), - Self::UninitializedBridgePallet => None, - Self::AccountDoesNotExist => None, - Self::MissingMandatoryCodeEntry => None, - Self::ClientNotSynced(_) => None, - Self::StorageProofError(_) => None, - Self::Custom(_) => None, - } - } -} - -impl From for Error { - fn from(error: RpcError) -> Self { - Error::RpcError(error) +impl From for Error { + fn from(error: tokio::task::JoinError) -> Self { + Error::Custom(format!("Failed to wait tokio task: {}", error)) } } @@ -71,7 +72,7 @@ impl MaybeConnectionError for Error { fn is_connection_error(&self) -> bool { matches!( *self, - Error::RpcError(RpcError::TransportError(_)) + Error::RpcError(RpcError::Transport(_)) // right now if connection to the ws server is dropped (after it is already established), // we're getting this error | Error::RpcError(RpcError::Internal(_)) @@ -80,26 +81,3 @@ impl MaybeConnectionError for Error { ) } } - -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let s = match self { - Self::RpcError(e) => e.to_string(), - Self::ResponseParseFailed(e) => e.to_string(), - Self::UninitializedBridgePallet => "The Substrate bridge pallet has not been initialized yet.".into(), - Self::AccountDoesNotExist => "Account does not exist on the chain".into(), - Self::MissingMandatoryCodeEntry => "Mandatory :code: entry is missing from runtime storage".into(), - Self::StorageProofError(e) => format!("Error when parsing storage proof: {:?}", e), - Self::ClientNotSynced(health) => format!("Substrate client is not synced: {}", health), - Self::Custom(e) => e.clone(), - }; - - write!(f, "{}", s) - } -} - -impl From for String { - fn from(error: Error) -> String { - error.to_string() - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/finality_source.rs b/polkadot/bridges/relays/client-substrate/src/finality_source.rs index 72a11ae99003b378286bc8b25694cf7a06c98ad8..98526de178cb3de5654d61da1b0a1f0737a495a6 100644 --- a/polkadot/bridges/relays/client-substrate/src/finality_source.rs +++ b/polkadot/bridges/relays/client-substrate/src/finality_source.rs @@ -16,10 +16,12 @@ //! Default generic implementation of finality source for basic Substrate client. -use crate::chain::{BlockWithJustification, Chain}; -use crate::client::Client; -use crate::error::Error; -use crate::sync_header::SyncHeader; +use crate::{ + chain::{BlockWithJustification, Chain}, + client::Client, + error::Error, + sync_header::SyncHeader, +}; use async_std::sync::{Arc, Mutex}; use async_trait::async_trait; @@ -43,12 +45,11 @@ pub struct FinalitySource { impl FinalitySource { /// Create new headers source using given client. - pub fn new(client: Client, maximal_header_number: Option>) -> Self { - FinalitySource { - client, - maximal_header_number, - _phantom: Default::default(), - } + pub fn new( + client: Client, + maximal_header_number: Option>, + ) -> Self { + FinalitySource { client, maximal_header_number, _phantom: Default::default() } } /// Returns reference to the underlying RPC client. @@ -122,7 +123,9 @@ where let justification = signed_block .justification() - .map(|raw_justification| GrandpaJustification::::decode(&mut raw_justification.as_slice())) + .map(|raw_justification| { + GrandpaJustification::::decode(&mut raw_justification.as_slice()) + }) .transpose() .map_err(Error::ResponseParseFailed)?; @@ -132,27 +135,35 @@ where async fn finality_proofs(&self) -> Result { Ok(unfold( self.client.clone().subscribe_justifications().await?, - move |mut subscription| async move { + move |subscription| async move { loop { - let next_justification = subscription.next().await?; + let log_error = |err| { + log::error!( + target: "bridge", + "Failed to read justification target from the {} justifications stream: {:?}", + P::SOURCE_NAME, + err, + ); + }; + + let next_justification = subscription + .next() + .await + .map_err(|err| log_error(err.to_string())) + .ok()??; + let decoded_justification = - GrandpaJustification::::decode(&mut &next_justification.0[..]); + GrandpaJustification::::decode(&mut &next_justification[..]); let justification = match decoded_justification { Ok(j) => j, Err(err) => { - log::error!( - target: "bridge", - "Failed to decode justification target from the {} justifications stream: {:?}", - P::SOURCE_NAME, - err, - ); - - continue; - } + log_error(format!("decode failed with error {:?}", err)); + continue + }, }; - return Some((justification, subscription)); + return Some((justification, subscription)) } }, ) diff --git a/polkadot/bridges/relays/client-substrate/src/guard.rs b/polkadot/bridges/relays/client-substrate/src/guard.rs index c6e191ce078ff617205021eb1142ac9265bff2e0..a064e36234007785e58776f0b1da836d9f81370e 100644 --- a/polkadot/bridges/relays/client-substrate/src/guard.rs +++ b/polkadot/bridges/relays/client-substrate/src/guard.rs @@ -17,32 +17,41 @@ //! Pallet provides a set of guard functions that are running in background threads //! and are aborting process if some condition fails. -use crate::{Chain, ChainWithBalances, Client}; +use crate::{error::Error, Chain, ChainWithBalances, Client}; use async_trait::async_trait; use num_traits::CheckedSub; use sp_version::RuntimeVersion; use std::{ collections::VecDeque, + fmt::Display, time::{Duration, Instant}, }; /// Guards environment. #[async_trait] pub trait Environment: Send + Sync + 'static { + /// Error type. + type Error: Display + Send + Sync + 'static; + /// Return current runtime version. - async fn runtime_version(&mut self) -> Result; + async fn runtime_version(&mut self) -> Result; /// Return free native balance of the account on the chain. - async fn free_native_balance(&mut self, account: C::AccountId) -> Result; + async fn free_native_balance( + &mut self, + account: C::AccountId, + ) -> Result; /// Return current time. fn now(&self) -> Instant { Instant::now() } + /// Sleep given amount of time. async fn sleep(&mut self, duration: Duration) { async_std::task::sleep(duration).await } + /// Abort current process. Called when guard condition check fails. async fn abort(&mut self) { std::process::abort(); @@ -50,7 +59,10 @@ pub trait Environment: Send + Sync + 'static { } /// Abort when runtime spec version is different from specified. -pub fn abort_on_spec_version_change(mut env: impl Environment, expected_spec_version: u32) { +pub fn abort_on_spec_version_change( + mut env: impl Environment, + expected_spec_version: u32, +) { async_std::task::spawn(async move { loop { let actual_spec_version = env.runtime_version().await; @@ -66,10 +78,10 @@ pub fn abort_on_spec_version_change(mut env: impl Environm ); env.abort().await; - } + }, Err(error) => log::warn!( target: "bridge-guard", - "Failed to read {} runtime version: {:?}. Relay may need to be stopped manually", + "Failed to read {} runtime version: {}. Relay may need to be stopped manually", C::NAME, error, ), @@ -80,8 +92,9 @@ pub fn abort_on_spec_version_change(mut env: impl Environm }); } -/// Abort if, during a 24 hours, free balance of given account is decreased at least by given value. -/// Other components may increase (or decrease) balance of account and it WILL affect logic of the guard. +/// Abort if, during 24 hours, free balance of given account is decreased at least by given value. +/// Other components may increase (or decrease) balance of account and it WILL affect logic of the +/// guard. pub fn abort_when_account_balance_decreased( mut env: impl Environment, account_id: C::AccountId, @@ -127,16 +140,16 @@ pub fn abort_when_account_balance_decreased( env.abort().await; } - } + }, Err(error) => { log::warn!( target: "bridge-guard", - "Failed to read {} account {:?} balance: {:?}. Relay may need to be stopped manually", + "Failed to read {} account {:?} balance: {}. Relay may need to be stopped manually", C::NAME, account_id, error, ); - } + }, }; env.sleep(conditions_check_delay::()).await; @@ -151,20 +164,24 @@ fn conditions_check_delay() -> Duration { #[async_trait] impl Environment for Client { - async fn runtime_version(&mut self) -> Result { - Client::::runtime_version(self).await.map_err(|e| e.to_string()) + type Error = Error; + + async fn runtime_version(&mut self) -> Result { + Client::::runtime_version(self).await } - async fn free_native_balance(&mut self, account: C::AccountId) -> Result { - Client::::free_native_balance(self, account) - .await - .map_err(|e| e.to_string()) + async fn free_native_balance( + &mut self, + account: C::AccountId, + ) -> Result { + Client::::free_native_balance(self, account).await } } #[cfg(test)] mod tests { use super::*; + use frame_support::weights::IdentityFee; use futures::{ channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, future::FutureExt, @@ -180,18 +197,24 @@ mod tests { type Hash = sp_core::H256; type Hasher = sp_runtime::traits::BlakeTwo256; type Header = sp_runtime::generic::Header; + + type AccountId = u32; + type Balance = u32; + type Index = u32; + type Signature = sp_runtime::testing::TestSignature; } impl Chain for TestChain { const NAME: &'static str = "Test"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(1); + const STORAGE_PROOF_OVERHEAD: u32 = 0; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 0; - type AccountId = u32; - type Index = u32; - type SignedBlock = - sp_runtime::generic::SignedBlock>; + type SignedBlock = sp_runtime::generic::SignedBlock< + sp_runtime::generic::Block, + >; type Call = (); - type Balance = u32; + type WeightToFee = IdentityFee; } impl ChainWithBalances for TestChain { @@ -209,11 +232,13 @@ mod tests { #[async_trait] impl Environment for TestEnvironment { - async fn runtime_version(&mut self) -> Result { + type Error = Error; + + async fn runtime_version(&mut self) -> Result { Ok(self.runtime_version_rx.next().await.unwrap_or_default()) } - async fn free_native_balance(&mut self, _account: u32) -> Result { + async fn free_native_balance(&mut self, _account: u32) -> Result { Ok(self.free_native_balance_rx.next().await.unwrap_or_default()) } @@ -249,10 +274,7 @@ mod tests { // client responds with wrong version runtime_version_tx - .send(RuntimeVersion { - spec_version: 42, - ..Default::default() - }) + .send(RuntimeVersion { spec_version: 42, ..Default::default() }) .await .unwrap(); @@ -284,10 +306,7 @@ mod tests { // client responds with the same version runtime_version_tx - .send(RuntimeVersion { - spec_version: 42, - ..Default::default() - }) + .send(RuntimeVersion { spec_version: 42, ..Default::default() }) .await .unwrap(); diff --git a/polkadot/bridges/relays/client-substrate/src/headers_source.rs b/polkadot/bridges/relays/client-substrate/src/headers_source.rs deleted file mode 100644 index 3dfcb220de4530b38db1e7c3a0ca3b68feb51dff..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/client-substrate/src/headers_source.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Default generic implementation of headers source for basic Substrate client. - -use crate::chain::{BlockWithJustification, Chain}; -use crate::client::Client; -use crate::error::Error; - -use async_trait::async_trait; -use headers_relay::{ - sync_loop::SourceClient, - sync_types::{HeaderIdOf, HeadersSyncPipeline, QueuedHeader, SourceHeader}, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_runtime::{traits::Header as HeaderT, EncodedJustification}; -use std::marker::PhantomData; - -/// Substrate node as headers source. -pub struct HeadersSource { - client: Client, - _phantom: PhantomData

, -} - -impl HeadersSource { - /// Create new headers source using given client. - pub fn new(client: Client) -> Self { - HeadersSource { - client, - _phantom: Default::default(), - } - } -} - -impl Clone for HeadersSource { - fn clone(&self) -> Self { - HeadersSource { - client: self.client.clone(), - _phantom: Default::default(), - } - } -} - -#[async_trait] -impl RelayClient for HeadersSource { - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl SourceClient

for HeadersSource -where - C: Chain, - C::BlockNumber: relay_utils::BlockNumberBase, - C::Header: Into, - P: HeadersSyncPipeline, - P::Header: SourceHeader, -{ - async fn best_block_number(&self) -> Result { - // we **CAN** continue to relay headers if source node is out of sync, because - // target node may be missing headers that are already available at the source - Ok(*self.client.best_header().await?.number()) - } - - async fn header_by_hash(&self, hash: P::Hash) -> Result { - self.client - .header_by_hash(hash) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_by_number(&self, number: P::Number) -> Result { - self.client - .header_by_number(number) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_completion(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, Option), Error> { - let hash = id.1; - let signed_block = self.client.get_block(Some(hash)).await?; - let grandpa_justification = signed_block.justification().cloned(); - - Ok((id, grandpa_justification)) - } - - async fn header_extra(&self, id: HeaderIdOf

, _header: QueuedHeader

) -> Result<(HeaderIdOf

, ()), Error> { - Ok((id, ())) - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/lib.rs b/polkadot/bridges/relays/client-substrate/src/lib.rs index 44895dcdc6e4b95bb24e92f6a44d1b264d5e1d9b..51ddf852b9b6f3a8af19d98c317218b47a3e1b27 100644 --- a/polkadot/bridges/relays/client-substrate/src/lib.rs +++ b/polkadot/bridges/relays/client-substrate/src/lib.rs @@ -26,14 +26,23 @@ mod sync_header; pub mod finality_source; pub mod guard; -pub mod headers_source; pub mod metrics; -pub use crate::chain::{BlockWithJustification, Chain, ChainWithBalances, TransactionSignScheme}; -pub use crate::client::{Client, JustificationsSubscription, OpaqueGrandpaAuthoritiesSet}; -pub use crate::error::{Error, Result}; -pub use crate::sync_header::SyncHeader; -pub use bp_runtime::{BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf}; +use std::time::Duration; + +pub use crate::{ + chain::{ + BlockWithJustification, CallOf, Chain, ChainWithBalances, TransactionSignScheme, + TransactionStatusOf, UnsignedTransaction, WeightToFeeOf, + }, + client::{Client, OpaqueGrandpaAuthoritiesSet, Subscription}, + error::{Error, Result}, + sync_header::SyncHeader, +}; +pub use bp_runtime::{ + AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf, + IndexOf, SignatureOf, TransactionEra, TransactionEraOf, +}; /// Header id used by the chain. pub type HeaderIdOf = relay_utils::HeaderId, BlockNumberOf>; @@ -41,7 +50,7 @@ pub type HeaderIdOf = relay_utils::HeaderId, BlockNumberOf>; /// Substrate-over-websocket connection params. #[derive(Debug, Clone)] pub struct ConnectionParams { - /// Websocket server hostname. + /// Websocket server host name. pub host: String, /// Websocket server TCP port. pub port: u16, @@ -51,10 +60,48 @@ pub struct ConnectionParams { impl Default for ConnectionParams { fn default() -> Self { - ConnectionParams { - host: "localhost".into(), - port: 9944, - secure: false, - } + ConnectionParams { host: "localhost".into(), port: 9944, secure: false } } } + +/// Returns stall timeout for relay loop. +/// +/// Relay considers himself stalled if he has submitted transaction to the node, but it has not +/// been mined for this period. +pub fn transaction_stall_timeout( + mortality_period: Option, + average_block_interval: Duration, + default_stall_timeout: Duration, +) -> Duration { + // 1 extra block for transaction to reach the pool && 1 for relayer to awake after it is mined + mortality_period + .map(|mortality_period| average_block_interval.saturating_mul(mortality_period + 1 + 1)) + .unwrap_or(default_stall_timeout) +} + +/// Returns stall timeout for relay loop that submit transactions to two chains. +/// +/// Bidirectional relay may have two active transactions. Even if one of them has been spoiled, we +/// can't just restart the loop - the other transaction may still be alive and we'll be submitting +/// duplicate transaction, which may result in funds loss. So we'll be selecting maximal mortality +/// for choosing loop stall timeout. +pub fn bidirectional_transaction_stall_timeout( + left_mortality_period: Option, + right_mortality_period: Option, + left_average_block_interval: Duration, + right_average_block_interval: Duration, + default_stall_timeout: Duration, +) -> Duration { + std::cmp::max( + transaction_stall_timeout( + left_mortality_period, + left_average_block_interval, + default_stall_timeout, + ), + transaction_stall_timeout( + right_mortality_period, + right_average_block_interval, + default_stall_timeout, + ), + ) +} diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs index f3ba8988eea4ab818ccbaf4447400296cd497454..7dccf82b6f8e435a8402097e5b342292640eb3ab 100644 --- a/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs +++ b/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs @@ -14,12 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::chain::Chain; -use crate::client::Client; +use crate::{chain::Chain, client::Client}; +use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; use codec::Decode; -use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64}; +use relay_utils::metrics::{ + metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, + StandaloneMetric, F64, +}; use sp_core::storage::StorageKey; use sp_runtime::{traits::UniqueSaturatedInto, FixedPointNumber}; use std::time::Duration; @@ -34,30 +37,45 @@ pub struct FloatStorageValueMetric { storage_key: StorageKey, maybe_default_value: Option, metric: Gauge, + shared_value_ref: F64SharedRef, } impl FloatStorageValueMetric { /// Create new metric. pub fn new( - registry: &Registry, - prefix: Option<&str>, client: Client, storage_key: StorageKey, maybe_default_value: Option, name: String, help: String, ) -> Result { + let shared_value_ref = Arc::new(RwLock::new(None)); Ok(FloatStorageValueMetric { client, storage_key, maybe_default_value, - metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + metric: Gauge::new(metric_name(None, &name), help)?, + shared_value_ref, }) } + + /// Get shared reference to metric value. + pub fn shared_value_ref(&self) -> F64SharedRef { + self.shared_value_ref.clone() + } +} + +impl Metric for FloatStorageValueMetric +where + T: 'static + Decode + Send + Sync + FixedPointNumber, +{ + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.metric.clone(), registry).map(drop) + } } #[async_trait] -impl StandaloneMetrics for FloatStorageValueMetric +impl StandaloneMetric for FloatStorageValueMetric where T: 'static + Decode + Send + Sync + FixedPointNumber, { @@ -66,17 +84,18 @@ where } async fn update(&self) { - relay_utils::metrics::set_gauge_value( - &self.metric, - self.client - .storage_value::(self.storage_key.clone()) - .await - .map(|maybe_storage_value| { - maybe_storage_value.or(self.maybe_default_value).map(|storage_value| { - storage_value.into_inner().unique_saturated_into() as f64 - / T::DIV.unique_saturated_into() as f64 - }) - }), - ); + let value = self + .client + .storage_value::(self.storage_key.clone(), None) + .await + .map(|maybe_storage_value| { + maybe_storage_value.or(self.maybe_default_value).map(|storage_value| { + storage_value.into_inner().unique_saturated_into() as f64 / + T::DIV.unique_saturated_into() as f64 + }) + }) + .map_err(drop); + relay_utils::metrics::set_gauge_value(&self.metric, value); + *self.shared_value_ref.write().await = value.ok().and_then(|x| x); } } diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs index 526fe1e048bfcc9f2b77940e2f4829ae35d9c0fc..f1c770ed228e7525ff895d2b7ffc774b8bc6d62c 100644 --- a/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs +++ b/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::chain::Chain; -use crate::client::Client; -use crate::error::Error; +use crate::{chain::Chain, client::Client, error::Error}; use async_trait::async_trait; -use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64}; +use relay_utils::metrics::{ + metric_name, register, Gauge, Metric, PrometheusError, Registry, StandaloneMetric, U64, +}; use sp_core::storage::StorageKey; use sp_runtime::traits::Header as HeaderT; use sp_storage::well_known_keys::CODE; @@ -40,25 +40,16 @@ pub struct StorageProofOverheadMetric { impl Clone for StorageProofOverheadMetric { fn clone(&self) -> Self { - StorageProofOverheadMetric { - client: self.client.clone(), - metric: self.metric.clone(), - } + StorageProofOverheadMetric { client: self.client.clone(), metric: self.metric.clone() } } } impl StorageProofOverheadMetric { /// Create new metric instance with given name and help. - pub fn new( - registry: &Registry, - prefix: Option<&str>, - client: Client, - name: String, - help: String, - ) -> Result { + pub fn new(client: Client, name: String, help: String) -> Result { Ok(StorageProofOverheadMetric { client, - metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + metric: Gauge::new(metric_name(None, &name), help)?, }) } @@ -73,22 +64,28 @@ impl StorageProofOverheadMetric { .await?; let storage_proof_size: usize = storage_proof.clone().iter_nodes().map(|n| n.len()).sum(); - let storage_value_reader = - bp_runtime::StorageProofChecker::::new(*best_header.state_root(), storage_proof) - .map_err(Error::StorageProofError)?; - let maybe_encoded_storage_value = storage_value_reader - .read_value(CODE) - .map_err(Error::StorageProofError)?; - let encoded_storage_value_size = maybe_encoded_storage_value - .ok_or(Error::MissingMandatoryCodeEntry)? - .len(); + let storage_value_reader = bp_runtime::StorageProofChecker::::new( + *best_header.state_root(), + storage_proof, + ) + .map_err(Error::StorageProofError)?; + let maybe_encoded_storage_value = + storage_value_reader.read_value(CODE).map_err(Error::StorageProofError)?; + let encoded_storage_value_size = + maybe_encoded_storage_value.ok_or(Error::MissingMandatoryCodeEntry)?.len(); Ok(storage_proof_size - encoded_storage_value_size) } } +impl Metric for StorageProofOverheadMetric { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.metric.clone(), registry).map(drop) + } +} + #[async_trait] -impl StandaloneMetrics for StorageProofOverheadMetric { +impl StandaloneMetric for StorageProofOverheadMetric { fn update_interval(&self) -> Duration { C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS } diff --git a/polkadot/bridges/relays/client-substrate/src/rpc.rs b/polkadot/bridges/relays/client-substrate/src/rpc.rs index 06df1f705d093d01f2c67ef9c513861bb70fd5e6..efd45ebe43f36d102ee17f7f047f20a30e4c8c1a 100644 --- a/polkadot/bridges/relays/client-substrate/src/rpc.rs +++ b/polkadot/bridges/relays/client-substrate/src/rpc.rs @@ -18,11 +18,13 @@ use crate::chain::Chain; +use pallet_transaction_payment_rpc_runtime_api::FeeDetails; use sc_rpc_api::{state::ReadProof, system::Health}; use sp_core::{ storage::{StorageData, StorageKey}, Bytes, }; +use sp_rpc::number::NumberOrHex; use sp_version::RuntimeVersion; jsonrpsee_proc_macros::rpc_client_api! { @@ -41,13 +43,17 @@ jsonrpsee_proc_macros::rpc_client_api! { fn system_account_next_index(account_id: C::AccountId) -> C::Index; #[rpc(method = "author_submitExtrinsic", positional_params)] fn author_submit_extrinsic(extrinsic: Bytes) -> C::Hash; + #[rpc(method = "author_pendingExtrinsics", positional_params)] + fn author_pending_extrinsics() -> Vec; #[rpc(method = "state_call", positional_params)] fn state_call(method: String, data: Bytes, at_block: Option) -> Bytes; #[rpc(method = "state_getStorage", positional_params)] - fn state_get_storage(key: StorageKey) -> Option; + fn state_get_storage(key: StorageKey, at_block: Option) -> Option; #[rpc(method = "state_getReadProof", positional_params)] fn state_prove_storage(keys: Vec, hash: Option) -> ReadProof; #[rpc(method = "state_getRuntimeVersion", positional_params)] fn state_runtime_version() -> RuntimeVersion; + #[rpc(method = "payment_queryFeeDetails", positional_params)] + fn payment_query_fee_details(extrinsic: Bytes, at_block: Option) -> FeeDetails; } } diff --git a/polkadot/bridges/relays/client-substrate/src/sync_header.rs b/polkadot/bridges/relays/client-substrate/src/sync_header.rs index 0b74dee690f2146f7d130c8eec3f062df0965571..ed3de6289ce01769265dff5d5493f0cf6c2fd987 100644 --- a/polkadot/bridges/relays/client-substrate/src/sync_header.rs +++ b/polkadot/bridges/relays/client-substrate/src/sync_header.rs @@ -16,13 +16,10 @@ use bp_header_chain::find_grandpa_authorities_scheduled_change; use finality_relay::SourceHeader as FinalitySourceHeader; -use headers_relay::sync_types::SourceHeader; -use num_traits::{CheckedSub, One}; -use relay_utils::HeaderId; use sp_runtime::traits::Header as HeaderT; /// Generic wrapper for `sp_runtime::traits::Header` based headers, that -/// implements `headers_relay::sync_types::SourceHeader` and may be used in headers sync directly. +/// implements `finality_relay::SourceHeader` and may be used in headers sync directly. #[derive(Clone, Debug, PartialEq)] pub struct SyncHeader

(Header); @@ -47,21 +44,6 @@ impl
From
for SyncHeader
{ } } -impl SourceHeader for SyncHeader
{ - fn id(&self) -> HeaderId { - relay_utils::HeaderId(*self.0.number(), self.hash()) - } - - fn parent_id(&self) -> HeaderId { - relay_utils::HeaderId( - self.number() - .checked_sub(&One::one()) - .expect("should never be called for genesis header"), - *self.parent_hash(), - ) - } -} - impl FinalitySourceHeader for SyncHeader
{ fn number(&self) -> Header::Number { *self.0.number() diff --git a/polkadot/bridges/relays/client-westend/Cargo.toml b/polkadot/bridges/relays/client-westend/Cargo.toml index a408ae3a46daf7b0bd9a0e6d7b5ddd3a28c8f379..24b05c4f4836b2dab2fc1947273dfc081b98ec65 100644 --- a/polkadot/bridges/relays/client-westend/Cargo.toml +++ b/polkadot/bridges/relays/client-westend/Cargo.toml @@ -6,8 +6,7 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } +codec = { package = "parity-scale-codec", version = "2.2.0" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } @@ -17,9 +16,5 @@ bp-westend = { path = "../../primitives/chain-westend" } # Substrate Dependencies -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-westend/src/lib.rs b/polkadot/bridges/relays/client-westend/src/lib.rs index 6768b81f10f8c63b73e0f43ac9c833097ae29c9d..c719d6ea55364be8c09d00220d82ec6681f7419d 100644 --- a/polkadot/bridges/relays/client-westend/src/lib.rs +++ b/polkadot/bridges/relays/client-westend/src/lib.rs @@ -16,10 +16,8 @@ //! Types used to connect to the Westend chain. -use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances}; +use sp_core::storage::StorageKey; use std::time::Duration; /// Westend header id. @@ -37,17 +35,22 @@ impl ChainBase for Westend { type Hash = bp_westend::Hash; type Hasher = bp_westend::Hasher; type Header = bp_westend::Header; + + type AccountId = bp_westend::AccountId; + type Balance = bp_westend::Balance; + type Index = bp_westend::Nonce; + type Signature = bp_westend::Signature; } impl Chain for Westend { const NAME: &'static str = "Westend"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + const STORAGE_PROOF_OVERHEAD: u32 = bp_westend::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_westend::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - type AccountId = bp_westend::AccountId; - type Index = bp_westend::Nonce; type SignedBlock = bp_westend::SignedBlock; type Call = bp_westend::Call; - type Balance = bp_westend::Balance; + type WeightToFee = bp_westend::WeightToFee; } impl ChainWithBalances for Westend { @@ -55,42 +58,3 @@ impl ChainWithBalances for Westend { StorageKey(bp_westend::account_info_storage_key(account_id)) } } - -impl TransactionSignScheme for Westend { - type Chain = Westend; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = bp_westend::UncheckedExtrinsic; - - fn sign_transaction( - genesis_hash: ::Hash, - signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, - ) -> Self::SignedTransaction { - let raw_payload = SignedPayload::new( - call, - bp_westend::SignedExtensions::new( - bp_westend::VERSION, - sp_runtime::generic::Era::Immortal, - genesis_hash, - signer_nonce, - 0, - ), - ) - .expect("SignedExtension never fails."); - - let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); - let signer: sp_runtime::MultiSigner = signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - bp_westend::UncheckedExtrinsic::new_signed( - call, - sp_runtime::MultiAddress::Id(signer.into_account()), - signature.into(), - extra, - ) - } -} - -/// Westend signing params. -pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/client-wococo/Cargo.toml b/polkadot/bridges/relays/client-wococo/Cargo.toml index c1b9aafd95e7c8d754faa781b4caa0e6939dbb5a..ea46c3c898bbbb3f22afd722242579a105d99105 100644 --- a/polkadot/bridges/relays/client-wococo/Cargo.toml +++ b/polkadot/bridges/relays/client-wococo/Cargo.toml @@ -6,10 +6,10 @@ edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers" } +codec = { package = "parity-scale-codec", version = "2.2.0" } relay-substrate-client = { path = "../client-substrate" } relay-utils = { path = "../utils" } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } # Bridge dependencies bridge-runtime-common = { path = "../../bin/runtime-common" } @@ -24,9 +24,6 @@ pallet-bridge-dispatch = { path = "../../modules/dispatch" } pallet-bridge-messages = { path = "../../modules/messages" } # Substrate Dependencies -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-wococo/src/lib.rs b/polkadot/bridges/relays/client-wococo/src/lib.rs index 8ceba7c7c436d5d8cc0ab38c20387b39753a5698..d61915ec123708580ac117f4ffbabdeddddde0c8 100644 --- a/polkadot/bridges/relays/client-wococo/src/lib.rs +++ b/polkadot/bridges/relays/client-wococo/src/lib.rs @@ -17,7 +17,10 @@ //! Types used to connect to the Wococo-Substrate chain. use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use relay_substrate_client::{ + Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, + UnsignedTransaction, +}; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; use std::time::Duration; @@ -39,17 +42,22 @@ impl ChainBase for Wococo { type Hash = bp_wococo::Hash; type Hasher = bp_wococo::Hashing; type Header = bp_wococo::Header; + + type AccountId = bp_wococo::AccountId; + type Balance = bp_wococo::Balance; + type Index = bp_wococo::Nonce; + type Signature = bp_wococo::Signature; } impl Chain for Wococo { const NAME: &'static str = "Wococo"; const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + const STORAGE_PROOF_OVERHEAD: u32 = bp_wococo::EXTRA_STORAGE_PROOF_SIZE; + const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - type AccountId = bp_wococo::AccountId; - type Index = bp_wococo::Index; type SignedBlock = bp_wococo::SignedBlock; type Call = crate::runtime::Call; - type Balance = bp_wococo::Balance; + type WeightToFee = bp_wococo::WeightToFee; } impl ChainWithBalances for Wococo { @@ -66,17 +74,17 @@ impl TransactionSignScheme for Wococo { fn sign_transaction( genesis_hash: ::Hash, signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, + era: TransactionEraOf, + unsigned: UnsignedTransaction, ) -> Self::SignedTransaction { let raw_payload = SignedPayload::new( - call, + unsigned.call, bp_wococo::SignedExtensions::new( bp_wococo::VERSION, - sp_runtime::generic::Era::Immortal, + era, genesis_hash, - signer_nonce, - 0, + unsigned.nonce, + unsigned.tip, ), ) .expect("SignedExtension never fails."); @@ -92,6 +100,24 @@ impl TransactionSignScheme for Wococo { extra, ) } + + fn is_signed(tx: &Self::SignedTransaction) -> bool { + tx.signature.is_some() + } + + fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { + tx.signature + .as_ref() + .map(|(address, _, _)| { + *address == bp_wococo::AccountId::from(*signer.public().as_array_ref()).into() + }) + .unwrap_or(false) + } + + fn parse_transaction(tx: Self::SignedTransaction) -> Option> { + let extra = &tx.signature.as_ref()?.2; + Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) + } } /// Wococo signing params. diff --git a/polkadot/bridges/relays/client-wococo/src/runtime.rs b/polkadot/bridges/relays/client-wococo/src/runtime.rs index e973c3a6d028055e5767b8987e07664d26b3be88..91d32d1aa76f71a9dc7aefe51bbcdd662053e3f7 100644 --- a/polkadot/bridges/relays/client-wococo/src/runtime.rs +++ b/polkadot/bridges/relays/client-wococo/src/runtime.rs @@ -21,9 +21,7 @@ use bp_polkadot_core::PolkadotLike; use bp_runtime::Chain; use codec::{Decode, Encode}; use frame_support::weights::Weight; - -/// Instance of messages pallet that is used to bridge with Rococo chain. -pub type WithRococoMessagesInstance = pallet_bridge_messages::DefaultInstance; +use scale_info::TypeInfo; /// Unchecked Wococo extrinsic. pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; @@ -55,15 +53,15 @@ where /// Wococo Runtime `Call` enum. /// /// The enum represents a subset of possible `Call`s we can send to Rococo chain. -/// Ideally this code would be auto-generated from Metadata, because we want to +/// Ideally this code would be auto-generated from metadata, because we want to /// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. /// /// All entries here (like pretty much in the entire file) must be kept in sync with Rococo /// `construct_runtime`, so that we maintain SCALE-compatibility. /// -/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs +/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs) #[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] pub enum Call { /// System pallet. #[codec(index = 0)] @@ -76,26 +74,26 @@ pub enum Call { BridgeMessagesRococo(BridgeMessagesRococoCall), } -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] #[allow(non_camel_case_types)] pub enum SystemCall { #[codec(index = 1)] remark(Vec), } -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] #[allow(non_camel_case_types)] pub enum BridgeGrandpaRococoCall { #[codec(index = 0)] submit_finality_proof( - ::Header, + Box<::Header>, bp_header_chain::justification::GrandpaJustification<::Header>, ), #[codec(index = 1)] initialize(bp_header_chain::InitializationData<::Header>), } -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)] +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] #[allow(non_camel_case_types)] pub enum BridgeMessagesRococoCall { #[codec(index = 3)] @@ -118,7 +116,9 @@ pub enum BridgeMessagesRococoCall { ), #[codec(index = 6)] receive_messages_delivery_proof( - bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof, + bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< + bp_rococo::Hash, + >, UnrewardedRelayersState, ), } diff --git a/polkadot/bridges/relays/exchange/Cargo.toml b/polkadot/bridges/relays/exchange/Cargo.toml deleted file mode 100644 index 62e7a029bbb2c996f49a2bfae213e9434ae07901..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "exchange-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -futures = "0.3.5" -log = "0.4.11" -num-traits = "0.2" -parking_lot = "0.11.0" -relay-utils = { path = "../utils" } diff --git a/polkadot/bridges/relays/exchange/src/exchange.rs b/polkadot/bridges/relays/exchange/src/exchange.rs deleted file mode 100644 index 7128a0ccd097b582112b787a6ef0cae774c26022..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange/src/exchange.rs +++ /dev/null @@ -1,919 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of exchange transaction. - -use async_trait::async_trait; -use relay_utils::{ - relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, -}; -use std::{ - fmt::{Debug, Display}, - string::ToString, -}; - -/// Transaction proof pipeline. -pub trait TransactionProofPipeline: 'static { - /// Name of the transaction proof source. - const SOURCE_NAME: &'static str; - /// Name of the transaction proof target. - const TARGET_NAME: &'static str; - - /// Block type. - type Block: SourceBlock; - /// Transaction inclusion proof type. - type TransactionProof: 'static + Send + Sync; -} - -/// Block that is participating in exchange. -pub trait SourceBlock: 'static + Send + Sync { - /// Block hash type. - type Hash: 'static + Clone + Send + Sync + Debug + Display; - /// Block number type. - type Number: 'static - + Debug - + Display - + Clone - + Copy - + Send - + Sync - + Into - + std::cmp::Ord - + std::ops::Add - + num_traits::One; - /// Block transaction. - type Transaction: SourceTransaction; - - /// Return hash of the block. - fn id(&self) -> relay_utils::HeaderId; - /// Return block transactions iterator. - fn transactions(&self) -> Vec; -} - -/// Transaction that is participating in exchange. -pub trait SourceTransaction: 'static + Send { - /// Transaction hash type. - type Hash: Debug + Display; - - /// Return transaction hash. - fn hash(&self) -> Self::Hash; -} - -/// Block hash for given pipeline. -pub type BlockHashOf

= <

::Block as SourceBlock>::Hash; - -/// Block number for given pipeline. -pub type BlockNumberOf

= <

::Block as SourceBlock>::Number; - -/// Transaction hash for given pipeline. -pub type TransactionOf

= <

::Block as SourceBlock>::Transaction; - -/// Transaction hash for given pipeline. -pub type TransactionHashOf

= as SourceTransaction>::Hash; - -/// Header id. -pub type HeaderId

= relay_utils::HeaderId, BlockNumberOf

>; - -/// Source client API. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Sleep until exchange-related data is (probably) updated. - async fn tick(&self); - /// Get block by hash. - async fn block_by_hash(&self, hash: BlockHashOf

) -> Result; - /// Get canonical block by number. - async fn block_by_number(&self, number: BlockNumberOf

) -> Result; - /// Return block + index where transaction has been **mined**. May return `Ok(None)` if transaction - /// is unknown to the source node. - async fn transaction_block(&self, hash: &TransactionHashOf

) - -> Result, usize)>, Self::Error>; - /// Prepare transaction proof. - async fn transaction_proof(&self, block: &P::Block, tx_index: usize) -> Result; -} - -/// Target client API. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Sleep until exchange-related data is (probably) updated. - async fn tick(&self); - /// Returns `Ok(true)` if header is known to the target node. - async fn is_header_known(&self, id: &HeaderId

) -> Result; - /// Returns `Ok(true)` if header is finalized by the target node. - async fn is_header_finalized(&self, id: &HeaderId

) -> Result; - /// Returns best finalized header id. - async fn best_finalized_header_id(&self) -> Result, Self::Error>; - /// Returns `Ok(true)` if transaction proof is need to be relayed. - async fn filter_transaction_proof(&self, proof: &P::TransactionProof) -> Result; - /// Submits transaction proof to the target node. - async fn submit_transaction_proof(&self, proof: P::TransactionProof) -> Result<(), Self::Error>; -} - -/// Block transaction statistics. -#[derive(Debug, Default)] -#[cfg_attr(test, derive(PartialEq))] -pub struct RelayedBlockTransactions { - /// Total number of transactions processed (either relayed or ignored) so far. - pub processed: usize, - /// Total number of transactions successfully relayed so far. - pub relayed: usize, - /// Total number of transactions that we have failed to relay so far. - pub failed: usize, -} - -/// Relay all suitable transactions from single block. -/// -/// If connection error occurs, returns Err with number of successfully processed transactions. -/// If some other error occurs, it is ignored and other transactions are processed. -/// -/// All transaction-level traces are written by this function. This function is not tracing -/// any information about block. -pub async fn relay_block_transactions( - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - source_block: &P::Block, - mut relayed_transactions: RelayedBlockTransactions, -) -> Result { - let transactions_to_process = source_block - .transactions() - .into_iter() - .enumerate() - .skip(relayed_transactions.processed); - for (source_tx_index, source_tx) in transactions_to_process { - let result = async { - let source_tx_id = format!("{}/{}", source_block.id().1, source_tx_index); - let source_tx_proof = - prepare_transaction_proof(source_client, &source_tx_id, source_block, source_tx_index) - .await - .map_err(|e| (FailedClient::Source, e))?; - - let needs_to_be_relayed = - target_client - .filter_transaction_proof(&source_tx_proof) - .await - .map_err(|err| { - ( - FailedClient::Target, - StringifiedMaybeConnectionError::new( - err.is_connection_error(), - format!("Transaction filtering has failed with {:?}", err), - ), - ) - })?; - - if !needs_to_be_relayed { - return Ok(false); - } - - relay_ready_transaction_proof(target_client, &source_tx_id, source_tx_proof) - .await - .map(|_| true) - .map_err(|e| (FailedClient::Target, e)) - } - .await; - - // We have two options here: - // 1) retry with the same transaction later; - // 2) report error and proceed with next transaction. - // - // Option#1 may seems better, but: - // 1) we do not track if transaction is mined (without an error) by the target node; - // 2) error could be irrecoverable (e.g. when block is already pruned by bridge module or tx - // has invalid format) && we'll end up in infinite loop of retrying the same transaction proof. - // - // So we're going with option#2 here (the only exception are connection errors). - match result { - Ok(false) => { - relayed_transactions.processed += 1; - } - Ok(true) => { - log::info!( - target: "bridge", - "{} transaction {} proof has been successfully submitted to {} node", - P::SOURCE_NAME, - source_tx.hash(), - P::TARGET_NAME, - ); - - relayed_transactions.processed += 1; - relayed_transactions.relayed += 1; - } - Err((failed_client, err)) => { - log::error!( - target: "bridge", - "Error relaying {} transaction {} proof to {} node: {}. {}", - P::SOURCE_NAME, - source_tx.hash(), - P::TARGET_NAME, - err.to_string(), - if err.is_connection_error() { - "Going to retry after delay..." - } else { - "You may need to submit proof of this transaction manually" - }, - ); - - if err.is_connection_error() { - return Err((failed_client, relayed_transactions)); - } - - relayed_transactions.processed += 1; - relayed_transactions.failed += 1; - } - } - } - - Ok(relayed_transactions) -} - -/// Relay single transaction proof. -pub async fn relay_single_transaction_proof( - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - source_tx_hash: TransactionHashOf

, -) -> Result<(), String> { - // wait for transaction and header on source node - let (source_header_id, source_tx_index) = wait_transaction_mined(source_client, &source_tx_hash).await?; - let source_block = source_client.block_by_hash(source_header_id.1.clone()).await; - let source_block = source_block.map_err(|err| { - format!( - "Error retrieving block {} from {} node: {:?}", - source_header_id.1, - P::SOURCE_NAME, - err, - ) - })?; - - // wait for transaction and header on target node - wait_header_imported(target_client, &source_header_id).await?; - wait_header_finalized(target_client, &source_header_id).await?; - - // and finally - prepare and submit transaction proof to target node - let source_tx_id = format!("{}", source_tx_hash); - relay_ready_transaction_proof( - target_client, - &source_tx_id, - prepare_transaction_proof(source_client, &source_tx_id, &source_block, source_tx_index) - .await - .map_err(|err| err.to_string())?, - ) - .await - .map_err(|err| err.to_string()) -} - -/// Prepare transaction proof. -async fn prepare_transaction_proof( - source_client: &impl SourceClient

, - source_tx_id: &str, - source_block: &P::Block, - source_tx_index: usize, -) -> Result { - source_client - .transaction_proof(source_block, source_tx_index) - .await - .map_err(|err| { - StringifiedMaybeConnectionError::new( - err.is_connection_error(), - format!( - "Error building transaction {} proof on {} node: {:?}", - source_tx_id, - P::SOURCE_NAME, - err, - ), - ) - }) -} - -/// Relay prepared proof of transaction. -async fn relay_ready_transaction_proof( - target_client: &impl TargetClient

, - source_tx_id: &str, - source_tx_proof: P::TransactionProof, -) -> Result<(), StringifiedMaybeConnectionError> { - target_client - .submit_transaction_proof(source_tx_proof) - .await - .map_err(|err| { - StringifiedMaybeConnectionError::new( - err.is_connection_error(), - format!( - "Error submitting transaction {} proof to {} node: {:?}", - source_tx_id, - P::TARGET_NAME, - err, - ), - ) - }) -} - -/// Wait until transaction is mined by source node. -async fn wait_transaction_mined( - source_client: &impl SourceClient

, - source_tx_hash: &TransactionHashOf

, -) -> Result<(HeaderId

, usize), String> { - loop { - let source_header_and_tx = source_client.transaction_block(source_tx_hash).await.map_err(|err| { - format!( - "Error retrieving transaction {} from {} node: {:?}", - source_tx_hash, - P::SOURCE_NAME, - err, - ) - })?; - match source_header_and_tx { - Some((source_header_id, source_tx)) => { - log::info!( - target: "bridge", - "Transaction {} is retrieved from {} node. Continuing...", - source_tx_hash, - P::SOURCE_NAME, - ); - - return Ok((source_header_id, source_tx)); - } - None => { - log::info!( - target: "bridge", - "Waiting for transaction {} to be mined by {} node...", - source_tx_hash, - P::SOURCE_NAME, - ); - - source_client.tick().await; - } - } - } -} - -/// Wait until target node imports required header. -async fn wait_header_imported( - target_client: &impl TargetClient

, - source_header_id: &HeaderId

, -) -> Result<(), String> { - loop { - let is_header_known = target_client.is_header_known(source_header_id).await.map_err(|err| { - format!( - "Failed to check existence of header {}/{} on {} node: {:?}", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - err, - ) - })?; - match is_header_known { - true => { - log::info!( - target: "bridge", - "Header {}/{} is known to {} node. Continuing.", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - return Ok(()); - } - false => { - log::info!( - target: "bridge", - "Waiting for header {}/{} to be imported by {} node...", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - target_client.tick().await; - } - } - } -} - -/// Wait until target node finalizes required header. -async fn wait_header_finalized( - target_client: &impl TargetClient

, - source_header_id: &HeaderId

, -) -> Result<(), String> { - loop { - let is_header_finalized = target_client - .is_header_finalized(source_header_id) - .await - .map_err(|err| { - format!( - "Failed to check finality of header {}/{} on {} node: {:?}", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - err, - ) - })?; - match is_header_finalized { - true => { - log::info!( - target: "bridge", - "Header {}/{} is finalizd by {} node. Continuing.", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - return Ok(()); - } - false => { - log::info!( - target: "bridge", - "Waiting for header {}/{} to be finalized by {} node...", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - target_client.tick().await; - } - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - - use parking_lot::Mutex; - use relay_utils::HeaderId; - use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - }; - - pub fn test_block_id() -> TestHeaderId { - HeaderId(1, 1) - } - - pub fn test_next_block_id() -> TestHeaderId { - HeaderId(2, 2) - } - - pub fn test_transaction_hash(tx_index: u64) -> TestTransactionHash { - 200 + tx_index - } - - pub fn test_transaction(tx_index: u64) -> TestTransaction { - TestTransaction(test_transaction_hash(tx_index)) - } - - pub fn test_block() -> TestBlock { - TestBlock(test_block_id(), vec![test_transaction(0)]) - } - - pub fn test_next_block() -> TestBlock { - TestBlock(test_next_block_id(), vec![test_transaction(1)]) - } - - pub type TestBlockNumber = u64; - pub type TestBlockHash = u64; - pub type TestTransactionHash = u64; - pub type TestHeaderId = HeaderId; - - #[derive(Debug, Clone, PartialEq)] - pub struct TestError(pub bool); - - impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - self.0 - } - } - - pub struct TestTransactionProofPipeline; - - impl TransactionProofPipeline for TestTransactionProofPipeline { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type Block = TestBlock; - type TransactionProof = TestTransactionProof; - } - - #[derive(Debug, Clone)] - pub struct TestBlock(pub TestHeaderId, pub Vec); - - impl SourceBlock for TestBlock { - type Hash = TestBlockHash; - type Number = TestBlockNumber; - type Transaction = TestTransaction; - - fn id(&self) -> TestHeaderId { - self.0 - } - - fn transactions(&self) -> Vec { - self.1.clone() - } - } - - #[derive(Debug, Clone)] - pub struct TestTransaction(pub TestTransactionHash); - - impl SourceTransaction for TestTransaction { - type Hash = TestTransactionHash; - - fn hash(&self) -> Self::Hash { - self.0 - } - } - - #[derive(Debug, Clone, PartialEq)] - pub struct TestTransactionProof(pub TestTransactionHash); - - #[derive(Clone)] - pub struct TestTransactionsSource { - pub on_tick: Arc, - pub data: Arc>, - } - - pub struct TestTransactionsSourceData { - pub block: Result, - pub transaction_block: Result, TestError>, - pub proofs_to_fail: HashMap, - } - - impl TestTransactionsSource { - pub fn new(on_tick: Box) -> Self { - Self { - on_tick: Arc::new(on_tick), - data: Arc::new(Mutex::new(TestTransactionsSourceData { - block: Ok(test_block()), - transaction_block: Ok(Some((test_block_id(), 0))), - proofs_to_fail: HashMap::new(), - })), - } - } - } - - #[async_trait] - impl RelayClient for TestTransactionsSource { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - Ok(()) - } - } - - #[async_trait] - impl SourceClient for TestTransactionsSource { - async fn tick(&self) { - (self.on_tick)(&mut *self.data.lock()) - } - - async fn block_by_hash(&self, _: TestBlockHash) -> Result { - self.data.lock().block.clone() - } - - async fn block_by_number(&self, _: TestBlockNumber) -> Result { - self.data.lock().block.clone() - } - - async fn transaction_block(&self, _: &TestTransactionHash) -> Result, TestError> { - self.data.lock().transaction_block.clone() - } - - async fn transaction_proof(&self, block: &TestBlock, index: usize) -> Result { - let tx_hash = block.1[index].hash(); - let proof_error = self.data.lock().proofs_to_fail.get(&tx_hash).cloned(); - if let Some(err) = proof_error { - return Err(err); - } - - Ok(TestTransactionProof(tx_hash)) - } - } - - #[derive(Clone)] - pub struct TestTransactionsTarget { - pub on_tick: Arc, - pub data: Arc>, - } - - pub struct TestTransactionsTargetData { - pub is_header_known: Result, - pub is_header_finalized: Result, - pub best_finalized_header_id: Result, - pub transactions_to_accept: HashSet, - pub submitted_proofs: Vec, - } - - impl TestTransactionsTarget { - pub fn new(on_tick: Box) -> Self { - Self { - on_tick: Arc::new(on_tick), - data: Arc::new(Mutex::new(TestTransactionsTargetData { - is_header_known: Ok(true), - is_header_finalized: Ok(true), - best_finalized_header_id: Ok(test_block_id()), - transactions_to_accept: vec![test_transaction_hash(0)].into_iter().collect(), - submitted_proofs: Vec::new(), - })), - } - } - } - - #[async_trait] - impl RelayClient for TestTransactionsTarget { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - Ok(()) - } - } - - #[async_trait] - impl TargetClient for TestTransactionsTarget { - async fn tick(&self) { - (self.on_tick)(&mut *self.data.lock()) - } - - async fn is_header_known(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_known.clone() - } - - async fn is_header_finalized(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_finalized.clone() - } - - async fn best_finalized_header_id(&self) -> Result { - self.data.lock().best_finalized_header_id.clone() - } - - async fn filter_transaction_proof(&self, proof: &TestTransactionProof) -> Result { - Ok(self.data.lock().transactions_to_accept.contains(&proof.0)) - } - - async fn submit_transaction_proof(&self, proof: TestTransactionProof) -> Result<(), TestError> { - self.data.lock().submitted_proofs.push(proof); - Ok(()) - } - } - - fn ensure_relay_single_success(source: &TestTransactionsSource, target: &TestTransactionsTarget) { - assert_eq!( - async_std::task::block_on(relay_single_transaction_proof(source, target, test_transaction_hash(0),)), - Ok(()), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(0))], - ); - } - - fn ensure_relay_single_failure(source: TestTransactionsSource, target: TestTransactionsTarget) { - assert!(async_std::task::block_on(relay_single_transaction_proof( - &source, - &target, - test_transaction_hash(0), - )) - .is_err()); - assert!(target.data.lock().submitted_proofs.is_empty()); - } - - #[test] - fn ready_transaction_proof_relayed_immediately() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_waits_for_transaction_to_be_mined() { - let source = TestTransactionsSource::new(Box::new(|source_data| { - assert_eq!(source_data.transaction_block, Ok(None)); - source_data.transaction_block = Ok(Some((test_block_id(), 0))); - })); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // transaction is not yet mined, but will be available after first wait (tick) - source.data.lock().transaction_block = Ok(None); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_fails_when_transaction_retrieval_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - source.data.lock().transaction_block = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_fails_when_proof_retrieval_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(0), TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_waits_for_header_to_be_imported() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|target_data| { - assert_eq!(target_data.is_header_known, Ok(false)); - target_data.is_header_known = Ok(true); - })); - - // header is not yet imported, but will be available after first wait (tick) - target.data.lock().is_header_known = Ok(false); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_fails_when_is_header_known_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target.data.lock().is_header_known = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_waits_for_header_to_be_finalized() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|target_data| { - assert_eq!(target_data.is_header_finalized, Ok(false)); - target_data.is_header_finalized = Ok(true); - })); - - // header is not yet finalized, but will be available after first wait (tick) - target.data.lock().is_header_finalized = Ok(false); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_fails_when_is_header_finalized_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target.data.lock().is_header_finalized = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_fails_when_target_node_rejects_proof() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target - .data - .lock() - .transactions_to_accept - .remove(&test_transaction_hash(0)); - - ensure_relay_single_success(&source, &target) - } - - fn test_relay_block_transactions( - source: &TestTransactionsSource, - target: &TestTransactionsTarget, - pre_relayed: RelayedBlockTransactions, - ) -> Result { - async_std::task::block_on(relay_block_transactions( - source, - target, - &TestBlock( - test_block_id(), - vec![test_transaction(0), test_transaction(1), test_transaction(2)], - ), - pre_relayed, - )) - .map_err(|(_, transactions)| transactions) - } - - #[test] - fn relay_block_transactions_process_all_transactions() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // let's only accept tx#1 - target - .data - .lock() - .transactions_to_accept - .remove(&test_transaction_hash(0)); - target - .data - .lock() - .transactions_to_accept - .insert(test_transaction_hash(1)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { - processed: 3, - relayed: 1, - failed: 0, - }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(1))], - ); - } - - #[test] - fn relay_block_transactions_ignores_transaction_failure() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // let's reject proof for tx#0 - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(0), TestError(false)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { - processed: 3, - relayed: 0, - failed: 1, - }), - ); - assert_eq!(target.data.lock().submitted_proofs, vec![]); - } - - #[test] - fn relay_block_transactions_fails_on_connection_error() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // fail with connection error when preparing proof for tx#1 - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(1), TestError(true)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Err(RelayedBlockTransactions { - processed: 1, - relayed: 1, - failed: 0, - }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(0))], - ); - - // now do not fail on tx#2 - source.data.lock().proofs_to_fail.clear(); - // and also relay tx#3 - target - .data - .lock() - .transactions_to_accept - .insert(test_transaction_hash(2)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { - processed: 3, - relayed: 2, - failed: 0, - }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![ - TestTransactionProof(test_transaction_hash(0)), - TestTransactionProof(test_transaction_hash(2)) - ], - ); - } -} diff --git a/polkadot/bridges/relays/exchange/src/exchange_loop.rs b/polkadot/bridges/relays/exchange/src/exchange_loop.rs deleted file mode 100644 index 8da4c3f45687da8ee2b561ce7a301e680ddbb3d9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange/src/exchange_loop.rs +++ /dev/null @@ -1,315 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of exchange transactions. - -use crate::exchange::{ - relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient, TargetClient, - TransactionProofPipeline, -}; -use crate::exchange_loop_metrics::ExchangeLoopMetrics; - -use backoff::backoff::Backoff; -use futures::{future::FutureExt, select}; -use num_traits::One; -use relay_utils::{ - metrics::{GlobalMetrics, MetricsParams}, - retry_backoff, FailedClient, MaybeConnectionError, -}; -use std::future::Future; - -/// Transactions proofs relay state. -#[derive(Debug)] -pub struct TransactionProofsRelayState { - /// Number of last header we have processed so far. - pub best_processed_header_number: BlockNumber, -} - -/// Transactions proofs relay storage. -pub trait TransactionProofsRelayStorage: 'static + Clone + Send + Sync { - /// Associated block number. - type BlockNumber: 'static + Send + Sync; - - /// Get relay state. - fn state(&self) -> TransactionProofsRelayState; - /// Update relay state. - fn set_state(&mut self, state: &TransactionProofsRelayState); -} - -/// In-memory storage for auto-relay loop. -#[derive(Debug, Clone)] -pub struct InMemoryStorage { - best_processed_header_number: BlockNumber, -} - -impl InMemoryStorage { - /// Created new in-memory storage with given best processed block number. - pub fn new(best_processed_header_number: BlockNumber) -> Self { - InMemoryStorage { - best_processed_header_number, - } - } -} - -impl TransactionProofsRelayStorage for InMemoryStorage { - type BlockNumber = BlockNumber; - - fn state(&self) -> TransactionProofsRelayState { - TransactionProofsRelayState { - best_processed_header_number: self.best_processed_header_number, - } - } - - fn set_state(&mut self, state: &TransactionProofsRelayState) { - self.best_processed_header_number = state.best_processed_header_number; - } -} - -/// Return prefix that will be used by default to expose Prometheus metrics of the exchange loop. -pub fn metrics_prefix() -> String { - format!("{}_to_{}_Exchange", P::SOURCE_NAME, P::TARGET_NAME) -} - -/// Run proofs synchronization. -pub async fn run( - storage: impl TransactionProofsRelayStorage>, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_params: MetricsParams, - exit_signal: impl Future + 'static + Send, -) -> Result<(), String> { - let exit_signal = exit_signal.shared(); - - relay_utils::relay_loop(source_client, target_client) - .with_metrics(Some(metrics_prefix::

()), metrics_params) - .loop_metric(|registry, prefix| ExchangeLoopMetrics::new(registry, prefix))? - .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? - .expose() - .await? - .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost( - storage.clone(), - source_client, - target_client, - metrics, - exit_signal.clone(), - ) - }) - .await -} - -/// Run proofs synchronization. -async fn run_until_connection_lost( - mut storage: impl TransactionProofsRelayStorage>, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_exch: Option, - exit_signal: impl Future + Send, -) -> Result<(), FailedClient> { - let mut retry_backoff = retry_backoff(); - let mut state = storage.state(); - let mut current_finalized_block = None; - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!(exit_signal); - - loop { - let iteration_result = run_loop_iteration( - &mut storage, - &source_client, - &target_client, - &mut state, - &mut current_finalized_block, - metrics_exch.as_ref(), - ) - .await; - - if let Err((is_connection_error, failed_client)) = iteration_result { - if is_connection_error { - return Err(failed_client); - } - - let retry_timeout = retry_backoff - .next_backoff() - .unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY); - select! { - _ = async_std::task::sleep(retry_timeout).fuse() => {}, - _ = exit_signal => return Ok(()), - } - } else { - retry_backoff.reset(); - - select! { - _ = source_client.tick().fuse() => {}, - _ = exit_signal => return Ok(()), - } - } - } -} - -/// Run exchange loop until we need to break. -async fn run_loop_iteration( - storage: &mut impl TransactionProofsRelayStorage>, - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - state: &mut TransactionProofsRelayState>, - current_finalized_block: &mut Option<(P::Block, RelayedBlockTransactions)>, - exchange_loop_metrics: Option<&ExchangeLoopMetrics>, -) -> Result<(), (bool, FailedClient)> { - let best_finalized_header_id = match target_client.best_finalized_header_id().await { - Ok(best_finalized_header_id) => { - log::debug!( - target: "bridge", - "Got best finalized {} block from {} node: {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - best_finalized_header_id, - ); - - best_finalized_header_id - } - Err(err) => { - log::error!( - target: "bridge", - "Failed to retrieve best {} header id from {} node: {:?}. Going to retry...", - P::SOURCE_NAME, - P::TARGET_NAME, - err, - ); - - return Err((err.is_connection_error(), FailedClient::Target)); - } - }; - - loop { - // if we already have some finalized block body, try to relay its transactions - if let Some((block, relayed_transactions)) = current_finalized_block.take() { - let result = relay_block_transactions(source_client, target_client, &block, relayed_transactions).await; - - match result { - Ok(relayed_transactions) => { - log::info!( - target: "bridge", - "Relay has processed {} block #{}. Total/Relayed/Failed transactions: {}/{}/{}", - P::SOURCE_NAME, - state.best_processed_header_number, - relayed_transactions.processed, - relayed_transactions.relayed, - relayed_transactions.failed, - ); - - state.best_processed_header_number = state.best_processed_header_number + One::one(); - storage.set_state(state); - - if let Some(exchange_loop_metrics) = exchange_loop_metrics { - exchange_loop_metrics.update::

( - state.best_processed_header_number, - best_finalized_header_id.0, - relayed_transactions, - ); - } - - // we have just updated state => proceed to next block retrieval - } - Err((failed_client, relayed_transactions)) => { - *current_finalized_block = Some((block, relayed_transactions)); - return Err((true, failed_client)); - } - } - } - - // we may need to retrieve finalized block body from source node - if best_finalized_header_id.0 > state.best_processed_header_number { - let next_block_number = state.best_processed_header_number + One::one(); - let result = source_client.block_by_number(next_block_number).await; - - match result { - Ok(block) => { - *current_finalized_block = Some((block, RelayedBlockTransactions::default())); - - // we have received new finalized block => go back to relay its transactions - continue; - } - Err(err) => { - log::error!( - target: "bridge", - "Failed to retrieve canonical block #{} from {} node: {:?}. Going to retry...", - next_block_number, - P::SOURCE_NAME, - err, - ); - - return Err((err.is_connection_error(), FailedClient::Source)); - } - } - } - - // there are no any transactions we need to relay => wait for new data - return Ok(()); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::exchange::tests::{ - test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof, TestTransactionsSource, - TestTransactionsTarget, - }; - use futures::{future::FutureExt, stream::StreamExt}; - - #[test] - fn exchange_loop_is_able_to_relay_proofs() { - let storage = InMemoryStorage { - best_processed_header_number: 0, - }; - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed"))); - let target_data = target.data.clone(); - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - - let source = TestTransactionsSource::new(Box::new(move |data| { - let transaction1_relayed = target_data - .lock() - .submitted_proofs - .contains(&TestTransactionProof(test_transaction_hash(0))); - let transaction2_relayed = target_data - .lock() - .submitted_proofs - .contains(&TestTransactionProof(test_transaction_hash(1))); - match (transaction1_relayed, transaction2_relayed) { - (true, true) => exit_sender.unbounded_send(()).unwrap(), - (true, false) => { - data.block = Ok(test_next_block()); - target_data.lock().best_finalized_header_id = Ok(test_next_block_id()); - target_data - .lock() - .transactions_to_accept - .insert(test_transaction_hash(1)); - } - _ => (), - } - })); - - let _ = async_std::task::block_on(run( - storage, - source, - target, - MetricsParams::disabled(), - exit_receiver.into_future().map(|(_, _)| ()), - )); - } -} diff --git a/polkadot/bridges/relays/exchange/src/exchange_loop_metrics.rs b/polkadot/bridges/relays/exchange/src/exchange_loop_metrics.rs deleted file mode 100644 index 82d3e649d4319ede95e76bb3cf5baeb4efcc3fdc..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange/src/exchange_loop_metrics.rs +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for currency-exchange relay loop. - -use crate::exchange::{BlockNumberOf, RelayedBlockTransactions, TransactionProofPipeline}; -use relay_utils::metrics::{ - metric_name, register, Counter, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64, -}; - -/// Exchange transactions relay metrics. -#[derive(Clone)] -pub struct ExchangeLoopMetrics { - /// Best finalized block numbers - "processed" and "known". - best_block_numbers: GaugeVec, - /// Number of processed blocks ("total"). - processed_blocks: Counter, - /// Number of processed transactions ("total", "relayed" and "failed"). - processed_transactions: CounterVec, -} - -impl ExchangeLoopMetrics { - /// Create and register exchange loop metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { - Ok(ExchangeLoopMetrics { - best_block_numbers: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "best_block_numbers"), - "Best finalized block numbers", - ), - &["type"], - )?, - registry, - )?, - processed_blocks: register( - Counter::new( - metric_name(prefix, "processed_blocks"), - "Total number of processed blocks", - )?, - registry, - )?, - processed_transactions: register( - CounterVec::new( - Opts::new( - metric_name(prefix, "processed_transactions"), - "Total number of processed transactions", - ), - &["type"], - )?, - registry, - )?, - }) - } -} - -impl ExchangeLoopMetrics { - /// Update metrics when single block is relayed. - pub fn update( - &self, - best_processed_block_number: BlockNumberOf

, - best_known_block_number: BlockNumberOf

, - relayed_transactions: RelayedBlockTransactions, - ) { - self.best_block_numbers - .with_label_values(&["processed"]) - .set(best_processed_block_number.into()); - self.best_block_numbers - .with_label_values(&["known"]) - .set(best_known_block_number.into()); - - self.processed_blocks.inc(); - - self.processed_transactions - .with_label_values(&["total"]) - .inc_by(relayed_transactions.processed as _); - self.processed_transactions - .with_label_values(&["relayed"]) - .inc_by(relayed_transactions.relayed as _); - self.processed_transactions - .with_label_values(&["failed"]) - .inc_by(relayed_transactions.failed as _); - } -} diff --git a/polkadot/bridges/relays/finality/Cargo.toml b/polkadot/bridges/relays/finality/Cargo.toml index 944da9837ffcb5eb0b94b8a4e56e0337c3fcda3c..645ac10775bafa04f2d7a86333f9fe16108217e4 100644 --- a/polkadot/bridges/relays/finality/Cargo.toml +++ b/polkadot/bridges/relays/finality/Cargo.toml @@ -12,7 +12,6 @@ async-trait = "0.1.40" backoff = "0.2" bp-header-chain = { path = "../../primitives/header-chain" } futures = "0.3.5" -headers-relay = { path = "../headers" } log = "0.4.11" num-traits = "0.2" relay-utils = { path = "../utils" } diff --git a/polkadot/bridges/relays/finality/src/finality_loop.rs b/polkadot/bridges/relays/finality/src/finality_loop.rs index 3ea729d123e7d71621327f03f862269cb52de315..320b44d310f0bfdc672778acf9fd46376201c507 100644 --- a/polkadot/bridges/relays/finality/src/finality_loop.rs +++ b/polkadot/bridges/relays/finality/src/finality_loop.rs @@ -19,17 +19,17 @@ //! is the mandatory headers, which we always submit to the target node. For such headers, we //! assume that the persistent proof either exists, or will eventually become available. -use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; +use crate::{ + sync_loop_metrics::SyncLoopMetrics, FinalityProof, FinalitySyncPipeline, SourceHeader, +}; use async_trait::async_trait; use backoff::backoff::Backoff; use futures::{select, Future, FutureExt, Stream, StreamExt}; -use headers_relay::sync_loop_metrics::SyncLoopMetrics; use num_traits::{One, Saturating}; use relay_utils::{ - metrics::{GlobalMetrics, MetricsParams}, - relay_loop::Client as RelayClient, - retry_backoff, FailedClient, MaybeConnectionError, + metrics::MetricsParams, relay_loop::Client as RelayClient, retry_backoff, FailedClient, + MaybeConnectionError, }; use std::{ pin::Pin, @@ -43,18 +43,19 @@ pub struct FinalitySyncParams { /// `min(source_block_time, target_block_time)`. /// /// This parameter may be used to limit transactions rate. Increase the value && you'll get - /// infrequent updates => sparse headers => potential slow down of bridge applications, but pallet storage - /// won't be super large. Decrease the value to near `source_block_time` and you'll get - /// transaction for (almost) every block of the source chain => all source headers will be known - /// to the target chain => bridge applications will run faster, but pallet storage may explode - /// (but if pruning is there, then it's fine). + /// infrequent updates => sparse headers => potential slow down of bridge applications, but + /// pallet storage won't be super large. Decrease the value to near `source_block_time` and + /// you'll get transaction for (almost) every block of the source chain => all source headers + /// will be known to the target chain => bridge applications will run faster, but pallet + /// storage may explode (but if pruning is there, then it's fine). pub tick: Duration, - /// Number of finality proofs to keep in internal buffer between loop wakeups. + /// Number of finality proofs to keep in internal buffer between loop iterations. /// - /// While in "major syncing" state, we still read finality proofs from the stream. They're stored - /// in the internal buffer between loop wakeups. When we're close to the tip of the chain, we may - /// meet finality delays if headers are not finalized frequently. So instead of waiting for next - /// finality proof to appear in the stream, we may use existing proof from that buffer. + /// While in "major syncing" state, we still read finality proofs from the stream. They're + /// stored in the internal buffer between loop iterations. When we're close to the tip of the + /// chain, we may meet finality delays if headers are not finalized frequently. So instead of + /// waiting for next finality proof to appear in the stream, we may use existing proof from + /// that buffer. pub recent_finality_proofs_limit: usize, /// Timeout before we treat our transactions as lost and restart the whole sync process. pub stall_timeout: Duration, @@ -89,10 +90,15 @@ pub trait TargetClient: RelayClient { async fn best_finalized_source_block_number(&self) -> Result; /// Submit header finality proof. - async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), Self::Error>; + async fn submit_finality_proof( + &self, + header: P::Header, + proof: P::FinalityProof, + ) -> Result<(), Self::Error>; } -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs +/// sync loop. pub fn metrics_prefix() -> String { format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) } @@ -104,12 +110,11 @@ pub async fn run( sync_params: FinalitySyncParams, metrics_params: MetricsParams, exit_signal: impl Future + 'static + Send, -) -> Result<(), String> { +) -> Result<(), relay_utils::Error> { let exit_signal = exit_signal.shared(); relay_utils::relay_loop(source_client, target_client) - .with_metrics(Some(metrics_prefix::

()), metrics_params) - .loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))? - .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .with_metrics(metrics_params) + .loop_metric(SyncLoopMetrics::new(Some(&metrics_prefix::

()))?)? .expose() .await? .run(metrics_prefix::

(), move |source_client, target_client, metrics| { @@ -127,15 +132,11 @@ pub async fn run( /// Unjustified headers container. Ordered by header number. pub(crate) type UnjustifiedHeaders = Vec; /// Finality proofs container. Ordered by target header number. -pub(crate) type FinalityProofs

= Vec<( -

::Number, -

::FinalityProof, -)>; +pub(crate) type FinalityProofs

= + Vec<(

::Number,

::FinalityProof)>; /// Reference to finality proofs container. -pub(crate) type FinalityProofsRef<'a, P> = &'a [( -

::Number, -

::FinalityProof, -)]; +pub(crate) type FinalityProofsRef<'a, P> = + &'a [(

::Number,

::FinalityProof)]; /// Error that may happen inside finality synchronization loop. #[derive(Debug)] @@ -186,10 +187,7 @@ pub(crate) struct RestartableFinalityProofsStream { #[cfg(test)] impl From for RestartableFinalityProofsStream { fn from(stream: S) -> Self { - RestartableFinalityProofsStream { - needs_restart: false, - stream: Box::pin(stream), - } + RestartableFinalityProofsStream { needs_restart: false, stream: Box::pin(stream) } } } @@ -260,14 +258,12 @@ async fn run_until_connection_lost( last_transaction = updated_last_transaction; retry_backoff.reset(); sync_params.tick - } + }, Err(error) => { log::error!(target: "bridge", "Finality sync loop iteration has failed with error: {:?}", error); error.fail_if_connection_error()?; - retry_backoff - .next_backoff() - .unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY) - } + retry_backoff.next_backoff().unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY) + }, }; if finality_proofs_stream.needs_restart { log::warn!(target: "bridge", "{} finality proofs stream is being restarted", P::SOURCE_NAME); @@ -297,10 +293,8 @@ where TC: TargetClient

, { // read best source headers ids from source and target nodes - let best_number_at_source = source_client - .best_finalized_block_number() - .await - .map_err(Error::Source)?; + let best_number_at_source = + source_client.best_finalized_block_number().await.map_err(Error::Source)?; let best_number_at_target = target_client .best_finalized_source_block_number() .await @@ -309,7 +303,8 @@ where metrics_sync.update_best_block_at_source(best_number_at_source); metrics_sync.update_best_block_at_target(best_number_at_target); } - *state.progress = print_sync_progress::

(*state.progress, best_number_at_source, best_number_at_target); + *state.progress = + print_sync_progress::

(*state.progress, best_number_at_source, best_number_at_target); // if we have already submitted header, then we just need to wait for it // if we're waiting too much, then we believe our transaction has been lost and restart sync @@ -324,9 +319,9 @@ where P::TARGET_NAME, ); - return Err(Error::Stalled); + return Err(Error::Stalled) } else { - return Ok(Some(last_transaction)); + return Ok(Some(last_transaction)) } } @@ -343,10 +338,8 @@ where .await? { Some((header, justification)) => { - let new_transaction = Transaction { - time: Instant::now(), - submitted_header_number: header.number(), - }; + let new_transaction = + Transaction { time: Instant::now(), submitted_header_number: header.number() }; log::debug!( target: "bridge", @@ -361,7 +354,7 @@ where .await .map_err(Error::Target)?; Ok(Some(new_transaction)) - } + }, None => Ok(None), } } @@ -398,15 +391,15 @@ where ) .await?; let (mut unjustified_headers, mut selected_finality_proof) = match selected_finality_proof { - SelectedFinalityProof::Mandatory(header, finality_proof) => return Ok(Some((header, finality_proof))), + SelectedFinalityProof::Mandatory(header, finality_proof) => + return Ok(Some((header, finality_proof))), _ if sync_params.only_mandatory_headers => { // we are not reading finality proofs from the stream, so eventually it'll break // but we don't care about transient proofs at all, so it is acceptable - return Ok(None); - } - SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => { - (unjustified_headers, Some((header, finality_proof))) - } + return Ok(None) + }, + SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => + (unjustified_headers, Some((header, finality_proof))), SelectedFinalityProof::None(unjustified_headers) => (unjustified_headers, None), }; @@ -451,7 +444,11 @@ pub(crate) enum SelectedFinalityProof { /// Otherwise, `SelectedFinalityProof::None` is returned. /// /// Unless we have found mandatory header, all missing headers are collected and returned. -pub(crate) async fn read_missing_headers, TC: TargetClient

>( +pub(crate) async fn read_missing_headers< + P: FinalitySyncPipeline, + SC: SourceClient

, + TC: TargetClient

, +>( source_client: &SC, _target_client: &TC, best_number_at_source: P::Number, @@ -470,22 +467,30 @@ pub(crate) async fn read_missing_headers { log::trace!(target: "bridge", "Header {:?} is mandatory", header_number); - return Ok(SelectedFinalityProof::Mandatory(header, finality_proof)); - } + return Ok(SelectedFinalityProof::Mandatory(header, finality_proof)) + }, (true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())), (false, Some(finality_proof)) => { log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number); unjustified_headers.clear(); selected_finality_proof = Some((header, finality_proof)); - } + }, (false, None) => { unjustified_headers.push(header); - } + }, } header_number = header_number + One::one(); } + log::trace!( + target: "bridge", + "Read {} {} headers. Selected finality proof for header: {:?}", + best_number_at_source.saturating_sub(best_number_at_target), + P::SOURCE_NAME, + selected_finality_proof.as_ref().map(|(header, _)| header), + ); + Ok(match selected_finality_proof { Some((header, proof)) => SelectedFinalityProof::Regular(unjustified_headers, header, proof), None => SelectedFinalityProof::None(unjustified_headers), @@ -493,22 +498,46 @@ pub(crate) async fn read_missing_headers>( +pub(crate) fn read_finality_proofs_from_stream< + P: FinalitySyncPipeline, + FPS: Stream, +>( finality_proofs_stream: &mut RestartableFinalityProofsStream, recent_finality_proofs: &mut FinalityProofs

, ) { + let mut proofs_count = 0; + let mut first_header_number = None; + let mut last_header_number = None; loop { let next_proof = finality_proofs_stream.stream.next(); let finality_proof = match next_proof.now_or_never() { Some(Some(finality_proof)) => finality_proof, Some(None) => { finality_proofs_stream.needs_restart = true; - break; - } + break + }, None => break, }; - recent_finality_proofs.push((finality_proof.target_header_number(), finality_proof)); + let target_header_number = finality_proof.target_header_number(); + if first_header_number.is_none() { + first_header_number = Some(target_header_number); + } + last_header_number = Some(target_header_number); + proofs_count += 1; + + recent_finality_proofs.push((target_header_number, finality_proof)); + } + + if proofs_count != 0 { + log::trace!( + target: "bridge", + "Read {} finality proofs from {} finality stream for headers in range [{:?}; {:?}]", + proofs_count, + P::SOURCE_NAME, + first_header_number, + last_header_number, + ); } } @@ -520,7 +549,13 @@ pub(crate) fn select_better_recent_finality_proof( selected_finality_proof: Option<(P::Header, P::FinalityProof)>, ) -> Option<(P::Header, P::FinalityProof)> { if unjustified_headers.is_empty() || recent_finality_proofs.is_empty() { - return selected_finality_proof; + log::trace!( + target: "bridge", + "Can not improve selected {} finality proof {:?}. No unjustified headers and recent proofs", + P::SOURCE_NAME, + selected_finality_proof.as_ref().map(|(h, _)| h.number()), + ); + return selected_finality_proof } const NOT_EMPTY_PROOF: &str = "we have checked that the vec is not empty; qed"; @@ -542,9 +577,24 @@ pub(crate) fn select_better_recent_finality_proof( let selected_finality_proof_index = recent_finality_proofs .binary_search_by_key(intersection.end(), |(number, _)| *number) .unwrap_or_else(|index| index.saturating_sub(1)); - let (selected_header_number, finality_proof) = &recent_finality_proofs[selected_finality_proof_index]; - if !intersection.contains(selected_header_number) { - return selected_finality_proof; + let (selected_header_number, finality_proof) = + &recent_finality_proofs[selected_finality_proof_index]; + let has_selected_finality_proof = intersection.contains(selected_header_number); + log::trace!( + target: "bridge", + "Trying to improve selected {} finality proof {:?}. Headers range: [{:?}; {:?}]. Proofs range: [{:?}; {:?}].\ + Trying to improve to: {:?}. Result: {}", + P::SOURCE_NAME, + selected_finality_proof.as_ref().map(|(h, _)| h.number()), + unjustified_range_begin, + unjustified_range_end, + buffered_range_begin, + buffered_range_end, + selected_header_number, + if has_selected_finality_proof { "improved" } else { "not improved" }, + ); + if !has_selected_finality_proof { + return selected_finality_proof } // now remove all obsolete headers and extract selected header @@ -560,20 +610,15 @@ pub(crate) fn prune_recent_finality_proofs( recent_finality_proofs: &mut FinalityProofs

, recent_finality_proofs_limit: usize, ) { - let position = - recent_finality_proofs.binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number); + let position = recent_finality_proofs + .binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number); // remove all obsolete elements - *recent_finality_proofs = recent_finality_proofs.split_off( - position - .map(|position| position + 1) - .unwrap_or_else(|position| position), - ); + *recent_finality_proofs = recent_finality_proofs + .split_off(position.map(|position| position + 1).unwrap_or_else(|position| position)); // now - limit vec by size - let split_index = recent_finality_proofs - .len() - .saturating_sub(recent_finality_proofs_limit); + let split_index = recent_finality_proofs.len().saturating_sub(recent_finality_proofs_limit); *recent_finality_proofs = recent_finality_proofs.split_off(split_index); } @@ -585,15 +630,15 @@ fn print_sync_progress( let (prev_time, prev_best_number_at_target) = progress_context; let now = Instant::now(); - let need_update = now - prev_time > Duration::from_secs(10) - || prev_best_number_at_target + let need_update = now - prev_time > Duration::from_secs(10) || + prev_best_number_at_target .map(|prev_best_number_at_target| { best_number_at_target.saturating_sub(prev_best_number_at_target) > 10.into() }) .unwrap_or(true); if !need_update { - return (prev_time, prev_best_number_at_target); + return (prev_time, prev_best_number_at_target) } log::info!( diff --git a/polkadot/bridges/relays/finality/src/finality_loop_tests.rs b/polkadot/bridges/relays/finality/src/finality_loop_tests.rs index d29c55cc4c3f81fd1de15510808e68eedd1584d0..e8f42593d1e36fcc76e5b851bf41f133c8f9118b 100644 --- a/polkadot/bridges/relays/finality/src/finality_loop_tests.rs +++ b/polkadot/bridges/relays/finality/src/finality_loop_tests.rs @@ -18,17 +18,21 @@ #![cfg(test)] -use crate::finality_loop::{ - prune_recent_finality_proofs, read_finality_proofs_from_stream, run, select_better_recent_finality_proof, - select_header_to_submit, FinalityProofs, FinalitySyncParams, RestartableFinalityProofsStream, SourceClient, - TargetClient, +use crate::{ + finality_loop::{ + prune_recent_finality_proofs, read_finality_proofs_from_stream, run, + select_better_recent_finality_proof, select_header_to_submit, FinalityProofs, + FinalitySyncParams, RestartableFinalityProofsStream, SourceClient, TargetClient, + }, + FinalityProof, FinalitySyncPipeline, SourceHeader, }; -use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; use async_trait::async_trait; use futures::{FutureExt, Stream, StreamExt}; use parking_lot::Mutex; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError}; +use relay_utils::{ + metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError, +}; use std::{collections::HashMap, pin::Pin, sync::Arc, time::Duration}; type IsMandatory = bool; @@ -121,10 +125,7 @@ impl SourceClient for TestSourceClient { ) -> Result<(TestSourceHeader, Option), TestError> { let mut data = self.data.lock(); (self.on_method_call)(&mut *data); - data.source_headers - .get(&number) - .cloned() - .ok_or(TestError::NonConnection) + data.source_headers.get(&number).cloned().ok_or(TestError::NonConnection) } async fn finality_proofs(&self) -> Result { @@ -157,7 +158,11 @@ impl TargetClient for TestTargetClient { Ok(data.target_best_block_number) } - async fn submit_finality_proof(&self, header: TestSourceHeader, proof: TestFinalityProof) -> Result<(), TestError> { + async fn submit_finality_proof( + &self, + header: TestSourceHeader, + proof: TestFinalityProof, + ) -> Result<(), TestError> { let mut data = self.data.lock(); (self.on_method_call)(&mut *data); data.target_best_block_number = header.number(); @@ -171,11 +176,12 @@ fn prepare_test_clients( state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static, source_headers: HashMap)>, ) -> (TestSourceClient, TestTargetClient) { - let internal_state_function: Arc = Arc::new(move |data| { - if state_function(data) { - exit_sender.unbounded_send(()).unwrap(); - } - }); + let internal_state_function: Arc = + Arc::new(move |data| { + if state_function(data) { + exit_sender.unbounded_send(()).unwrap(); + } + }); let clients_data = Arc::new(Mutex::new(ClientsData { source_best_block_number: 10, source_headers, @@ -189,14 +195,13 @@ fn prepare_test_clients( on_method_call: internal_state_function.clone(), data: clients_data.clone(), }, - TestTargetClient { - on_method_call: internal_state_function, - data: clients_data, - }, + TestTargetClient { on_method_call: internal_state_function, data: clients_data }, ) } -fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static) -> ClientsData { +fn run_sync_loop( + state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static, +) -> ClientsData { let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); let (source_client, target_client) = prepare_test_clients( exit_sender, @@ -234,12 +239,13 @@ fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync #[test] fn finality_sync_loop_works() { let client_data = run_sync_loop(|data| { - // header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, because - // header#8 has persistent finality proof && it is mandatory => it is submitted - // header#9 has persistent finality proof, but it isn't mandatory => it is submitted, because - // there are no more persistent finality proofs + // header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, + // because header#8 has persistent finality proof && it is mandatory => it is submitted + // header#9 has persistent finality proof, but it isn't mandatory => it is submitted, + // because there are no more persistent finality proofs // - // once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from the stream + // once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from + // the stream if data.target_best_block_number == 9 { data.source_best_block_number = 14; data.source_headers.insert(11, (TestSourceHeader(false, 11), None)); @@ -287,10 +293,7 @@ fn run_only_mandatory_headers_mode_test( vec![ (6, (TestSourceHeader(false, 6), Some(TestFinalityProof(6)))), (7, (TestSourceHeader(false, 7), Some(TestFinalityProof(7)))), - ( - 8, - (TestSourceHeader(has_mandatory_headers, 8), Some(TestFinalityProof(8))), - ), + (8, (TestSourceHeader(has_mandatory_headers, 8), Some(TestFinalityProof(8)))), (9, (TestSourceHeader(false, 9), Some(TestFinalityProof(9)))), (10, (TestSourceHeader(false, 10), Some(TestFinalityProof(10)))), ] @@ -357,7 +360,8 @@ fn select_better_recent_finality_proof_works() { Some((TestSourceHeader(false, 2), TestFinalityProof(2))), ); - // if there's no intersection between recent finality proofs and unjustified headers, nothing is changed + // if there's no intersection between recent finality proofs and unjustified headers, nothing is + // changed let mut unjustified_headers = vec![TestSourceHeader(false, 9), TestSourceHeader(false, 10)]; assert_eq!( select_better_recent_finality_proof::( @@ -368,13 +372,10 @@ fn select_better_recent_finality_proof_works() { Some((TestSourceHeader(false, 2), TestFinalityProof(2))), ); - // if there's intersection between recent finality proofs and unjustified headers, but there are no - // proofs in this intersection, nothing is changed - let mut unjustified_headers = vec![ - TestSourceHeader(false, 8), - TestSourceHeader(false, 9), - TestSourceHeader(false, 10), - ]; + // if there's intersection between recent finality proofs and unjustified headers, but there are + // no proofs in this intersection, nothing is changed + let mut unjustified_headers = + vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)]; assert_eq!( select_better_recent_finality_proof::( &[(7, TestFinalityProof(7)), (11, TestFinalityProof(11))], @@ -385,22 +386,15 @@ fn select_better_recent_finality_proof_works() { ); assert_eq!( unjustified_headers, - vec![ - TestSourceHeader(false, 8), - TestSourceHeader(false, 9), - TestSourceHeader(false, 10) - ] + vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)] ); // if there's intersection between recent finality proofs and unjustified headers and there's // a proof in this intersection: // - this better (last from intersection) proof is selected; // - 'obsolete' unjustified headers are pruned. - let mut unjustified_headers = vec![ - TestSourceHeader(false, 8), - TestSourceHeader(false, 9), - TestSourceHeader(false, 10), - ]; + let mut unjustified_headers = + vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)]; assert_eq!( select_better_recent_finality_proof::( &[(7, TestFinalityProof(7)), (9, TestFinalityProof(9))], @@ -416,7 +410,10 @@ fn read_finality_proofs_from_stream_works() { // when stream is currently empty, nothing is changed let mut recent_finality_proofs = vec![(1, TestFinalityProof(1))]; let mut stream = futures::stream::pending().into(); - read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); + read_finality_proofs_from_stream::( + &mut stream, + &mut recent_finality_proofs, + ); assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1))]); assert!(!stream.needs_restart); @@ -424,20 +421,20 @@ fn read_finality_proofs_from_stream_works() { let mut stream = futures::stream::iter(vec![TestFinalityProof(4)]) .chain(futures::stream::pending()) .into(); - read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); - assert_eq!( - recent_finality_proofs, - vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] + read_finality_proofs_from_stream::( + &mut stream, + &mut recent_finality_proofs, ); + assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]); assert!(!stream.needs_restart); // when stream has ended, we'll need to restart it let mut stream = futures::stream::empty().into(); - read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); - assert_eq!( - recent_finality_proofs, - vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] + read_finality_proofs_from_stream::( + &mut stream, + &mut recent_finality_proofs, ); + assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]); assert!(stream.needs_restart); } diff --git a/polkadot/bridges/relays/finality/src/lib.rs b/polkadot/bridges/relays/finality/src/lib.rs index 64ec5bed05005ff4664660b27feb094bee157675..6421d13b787c8463c9a89e9cca3db8e15144cc81 100644 --- a/polkadot/bridges/relays/finality/src/lib.rs +++ b/polkadot/bridges/relays/finality/src/lib.rs @@ -19,13 +19,16 @@ //! are still submitted to the target node, but are treated as auxiliary data as we are not trying //! to submit all source headers to the target node. -pub use crate::finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient}; +pub use crate::finality_loop::{ + metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient, +}; use bp_header_chain::FinalityProof; use std::fmt::Debug; mod finality_loop; mod finality_loop_tests; +mod sync_loop_metrics; /// Finality proofs synchronization pipeline. pub trait FinalitySyncPipeline: 'static + Clone + Debug + Send + Sync { diff --git a/polkadot/bridges/relays/finality/src/sync_loop_metrics.rs b/polkadot/bridges/relays/finality/src/sync_loop_metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..1f65dac17c05eae0d81f5934a4250b8beef3fbbb --- /dev/null +++ b/polkadot/bridges/relays/finality/src/sync_loop_metrics.rs @@ -0,0 +1,64 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Metrics for headers synchronization relay loop. + +use relay_utils::metrics::{ + metric_name, register, GaugeVec, Metric, Opts, PrometheusError, Registry, U64, +}; + +/// Headers sync metrics. +#[derive(Clone)] +pub struct SyncLoopMetrics { + /// Best syncing headers at "source" and "target" nodes. + best_block_numbers: GaugeVec, +} + +impl SyncLoopMetrics { + /// Create and register headers loop metrics. + pub fn new(prefix: Option<&str>) -> Result { + Ok(SyncLoopMetrics { + best_block_numbers: GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best block numbers on source and target nodes", + ), + &["node"], + )?, + }) + } + + /// Update best block number at source. + pub fn update_best_block_at_source>(&self, source_best_number: Number) { + self.best_block_numbers + .with_label_values(&["source"]) + .set(source_best_number.into()); + } + + /// Update best block number at target. + pub fn update_best_block_at_target>(&self, target_best_number: Number) { + self.best_block_numbers + .with_label_values(&["target"]) + .set(target_best_number.into()); + } +} + +impl Metric for SyncLoopMetrics { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.best_block_numbers.clone(), registry)?; + Ok(()) + } +} diff --git a/polkadot/bridges/relays/headers/Cargo.toml b/polkadot/bridges/relays/headers/Cargo.toml deleted file mode 100644 index 31d3166a99781a013c8211d7b68586e778e63ffb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "headers-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -futures = "0.3.5" -linked-hash-map = "0.5.3" -log = "0.4.11" -num-traits = "0.2" -parking_lot = "0.11.0" -relay-utils = { path = "../utils" } diff --git a/polkadot/bridges/relays/headers/src/headers.rs b/polkadot/bridges/relays/headers/src/headers.rs deleted file mode 100644 index 0b948d9da4cc6e0bc6ae89cb133dd410e513aec5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers/src/headers.rs +++ /dev/null @@ -1,1721 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Headers queue - the intermediate buffer that is filled when headers are read -//! from the source chain. Headers are removed from the queue once they become -//! known to the target chain. Inside, there are several sub-queues, where headers -//! may stay until source/target chain state isn't updated. When a header reaches the -//! `ready` sub-queue, it may be submitted to the target chain. - -use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader}; - -use linked_hash_map::LinkedHashMap; -use num_traits::{One, Zero}; -use relay_utils::HeaderId; -use std::{ - collections::{btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap, HashSet}, - time::{Duration, Instant}, -}; - -type HeadersQueue

= - BTreeMap<

::Number, HashMap<

::Hash, QueuedHeader

>>; -type SyncedChildren

= - BTreeMap<

::Number, HashMap<

::Hash, HashSet>>>; -type KnownHeaders

= - BTreeMap<

::Number, HashMap<

::Hash, HeaderStatus>>; - -/// We're trying to fetch completion data for single header at this interval. -const RETRY_FETCH_COMPLETION_INTERVAL: Duration = Duration::from_secs(20); - -/// Headers queue. -#[derive(Debug)] -pub struct QueuedHeaders { - /// Headers that are received from source node, but we (native sync code) have - /// never seen their parents. So we need to check if we can/should submit this header. - maybe_orphan: HeadersQueue

, - /// Headers that are received from source node, and we (native sync code) have - /// checked that Substrate runtime doesn't know their parents. So we need to submit parents - /// first. - orphan: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but we need to check - /// whether submission requires extra data to be provided. - maybe_extra: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but we need to retrieve - /// extra data first. - extra: HeadersQueue

, - /// Headers that are ready to be submitted to target node. - ready: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but their ancestor is incomplete. - /// Thus we're waiting for these ancestors to be completed first. - /// Note that the incomplete header itself is synced and it isn't in this queue. - incomplete: HeadersQueue

, - /// Headers that are (we believe) currently submitted to target node by our, - /// not-yet mined transactions. - submitted: HeadersQueue

, - /// Synced headers childrens. We need it to support case when header is synced, but some of - /// its parents are incomplete. - synced_children: SyncedChildren

, - /// Pointers to all headers that we ever seen and we believe we can touch in the future. - known_headers: KnownHeaders

, - /// Headers that are waiting for completion data from source node. Mapped (and auto-sorted - /// by) to the last fetch time. - incomplete_headers: LinkedHashMap, Option>, - /// Headers that are waiting to be completed at target node. Auto-sorted by insertion time. - completion_data: LinkedHashMap, P::Completion>, - /// Best synced block number. - best_synced_number: P::Number, - /// Pruned blocks border. We do not store or accept any blocks with number less than - /// this number. - prune_border: P::Number, -} - -/// Header completion data. -#[derive(Debug)] -struct HeaderCompletion { - /// Last time when we tried to upload completion data to target node, if ever. - pub last_upload_time: Option, - /// Completion data. - pub completion: Completion, -} - -impl Default for QueuedHeaders

{ - fn default() -> Self { - QueuedHeaders { - maybe_orphan: HeadersQueue::new(), - orphan: HeadersQueue::new(), - maybe_extra: HeadersQueue::new(), - extra: HeadersQueue::new(), - ready: HeadersQueue::new(), - incomplete: HeadersQueue::new(), - submitted: HeadersQueue::new(), - synced_children: SyncedChildren::

::new(), - known_headers: KnownHeaders::

::new(), - incomplete_headers: LinkedHashMap::new(), - completion_data: LinkedHashMap::new(), - best_synced_number: Zero::zero(), - prune_border: Zero::zero(), - } - } -} - -impl QueuedHeaders

{ - /// Returns prune border. - #[cfg(test)] - pub fn prune_border(&self) -> P::Number { - self.prune_border - } - - /// Returns number of headers that are currently in given queue. - pub fn headers_in_status(&self, status: HeaderStatus) -> usize { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => 0, - HeaderStatus::MaybeOrphan => self - .maybe_orphan - .values() - .fold(0, |total, headers| total + headers.len()), - HeaderStatus::Orphan => self.orphan.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::MaybeExtra => self - .maybe_extra - .values() - .fold(0, |total, headers| total + headers.len()), - HeaderStatus::Extra => self.extra.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Ready => self.ready.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Incomplete => self.incomplete.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Submitted => self.submitted.values().fold(0, |total, headers| total + headers.len()), - } - } - - /// Returns number of headers that are currently in the queue. - pub fn total_headers(&self) -> usize { - self.maybe_orphan - .values() - .fold(0, |total, headers| total + headers.len()) - + self.orphan.values().fold(0, |total, headers| total + headers.len()) - + self - .maybe_extra - .values() - .fold(0, |total, headers| total + headers.len()) - + self.extra.values().fold(0, |total, headers| total + headers.len()) - + self.ready.values().fold(0, |total, headers| total + headers.len()) - + self.incomplete.values().fold(0, |total, headers| total + headers.len()) - } - - /// Returns number of best block in the queue. - pub fn best_queued_number(&self) -> P::Number { - std::cmp::max( - self.maybe_orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.maybe_extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.ready.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.incomplete.keys().next_back().cloned().unwrap_or_else(Zero::zero), - self.submitted.keys().next_back().cloned().unwrap_or_else(Zero::zero), - ), - ), - ), - ), - ), - ) - } - - /// Returns number of best synced block we have ever seen. It is either less - /// than `best_queued_number()`, or points to last synced block if queue is empty. - pub fn best_synced_number(&self) -> P::Number { - self.best_synced_number - } - - /// Returns synchronization status of the header. - pub fn status(&self, id: &HeaderIdOf

) -> HeaderStatus { - self.known_headers - .get(&id.0) - .and_then(|x| x.get(&id.1)) - .cloned() - .unwrap_or(HeaderStatus::Unknown) - } - - /// Get oldest header from given queue. - pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader

> { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => None, - HeaderStatus::MaybeOrphan => oldest_header(&self.maybe_orphan), - HeaderStatus::Orphan => oldest_header(&self.orphan), - HeaderStatus::MaybeExtra => oldest_header(&self.maybe_extra), - HeaderStatus::Extra => oldest_header(&self.extra), - HeaderStatus::Ready => oldest_header(&self.ready), - HeaderStatus::Incomplete => oldest_header(&self.incomplete), - HeaderStatus::Submitted => oldest_header(&self.submitted), - } - } - - /// Get oldest headers from given queue until functor will return false. - pub fn headers( - &self, - status: HeaderStatus, - f: impl FnMut(&QueuedHeader

) -> bool, - ) -> Option>> { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => None, - HeaderStatus::MaybeOrphan => oldest_headers(&self.maybe_orphan, f), - HeaderStatus::Orphan => oldest_headers(&self.orphan, f), - HeaderStatus::MaybeExtra => oldest_headers(&self.maybe_extra, f), - HeaderStatus::Extra => oldest_headers(&self.extra, f), - HeaderStatus::Ready => oldest_headers(&self.ready, f), - HeaderStatus::Incomplete => oldest_headers(&self.incomplete, f), - HeaderStatus::Submitted => oldest_headers(&self.submitted, f), - } - } - - /// Appends new header, received from the source node, to the queue. - pub fn header_response(&mut self, header: P::Header) { - let id = header.id(); - let status = self.status(&id); - if status != HeaderStatus::Unknown { - log::debug!( - target: "bridge", - "Ignoring new {} header: {:?}. Status is {:?}.", - P::SOURCE_NAME, - id, - status, - ); - return; - } - - if id.0 < self.prune_border { - log::debug!( - target: "bridge", - "Ignoring ancient new {} header: {:?}.", - P::SOURCE_NAME, - id, - ); - return; - } - - let parent_id = header.parent_id(); - let parent_status = self.status(&parent_id); - let header = QueuedHeader::new(header); - - let status = match parent_status { - HeaderStatus::Unknown | HeaderStatus::MaybeOrphan => { - insert_header(&mut self.maybe_orphan, id, header); - HeaderStatus::MaybeOrphan - } - HeaderStatus::Orphan => { - insert_header(&mut self.orphan, id, header); - HeaderStatus::Orphan - } - HeaderStatus::MaybeExtra - | HeaderStatus::Extra - | HeaderStatus::Ready - | HeaderStatus::Incomplete - | HeaderStatus::Submitted - | HeaderStatus::Synced => { - insert_header(&mut self.maybe_extra, id, header); - HeaderStatus::MaybeExtra - } - }; - - self.known_headers.entry(id.0).or_default().insert(id.1, status); - log::debug!( - target: "bridge", - "Queueing new {} header: {:?}. Queue: {:?}.", - P::SOURCE_NAME, - id, - status, - ); - } - - /// Receive best header from the target node. - pub fn target_best_header_response(&mut self, id: &HeaderIdOf

) { - self.header_synced(id) - } - - /// Receive target node response for MaybeOrphan request. - pub fn maybe_orphan_response(&mut self, id: &HeaderIdOf

, response: bool) { - if !response { - move_header_descendants::

( - &mut [&mut self.maybe_orphan], - &mut self.orphan, - &mut self.known_headers, - HeaderStatus::Orphan, - id, - ); - return; - } - - move_header_descendants::

( - &mut [&mut self.maybe_orphan, &mut self.orphan], - &mut self.maybe_extra, - &mut self.known_headers, - HeaderStatus::MaybeExtra, - id, - ); - } - - /// Receive target node response for MaybeExtra request. - pub fn maybe_extra_response(&mut self, id: &HeaderIdOf

, response: bool) { - let (destination_status, destination_queue) = if response { - (HeaderStatus::Extra, &mut self.extra) - } else if self.is_parent_incomplete(id) { - (HeaderStatus::Incomplete, &mut self.incomplete) - } else { - (HeaderStatus::Ready, &mut self.ready) - }; - - move_header( - &mut self.maybe_extra, - destination_queue, - &mut self.known_headers, - destination_status, - id, - |header| header, - ); - } - - /// Receive extra from source node. - pub fn extra_response(&mut self, id: &HeaderIdOf

, extra: P::Extra) { - let (destination_status, destination_queue) = if self.is_parent_incomplete(id) { - (HeaderStatus::Incomplete, &mut self.incomplete) - } else { - (HeaderStatus::Ready, &mut self.ready) - }; - - // move header itself from extra to ready queue - move_header( - &mut self.extra, - destination_queue, - &mut self.known_headers, - destination_status, - id, - |header| header.set_extra(extra), - ); - } - - /// Receive completion response from source node. - pub fn completion_response(&mut self, id: &HeaderIdOf

, completion: Option) { - let completion = match completion { - Some(completion) => completion, - None => { - log::debug!( - target: "bridge", - "{} Node is still missing completion data for header: {:?}. Will retry later.", - P::SOURCE_NAME, - id, - ); - - return; - } - }; - - // do not remove from `incomplete_headers` here, because otherwise we'll miss - // completion 'notification' - // this could lead to duplicate completion retrieval (if completion transaction isn't mined - // for too long) - // - // instead, we're moving entry to the end of the queue, so that completion data won't be - // refetched instantly - if self.incomplete_headers.remove(id).is_some() { - log::debug!( - target: "bridge", - "Received completion data from {} for header: {:?}", - P::SOURCE_NAME, - id, - ); - - self.completion_data.insert(*id, completion); - self.incomplete_headers.insert(*id, Some(Instant::now())); - } - } - - /// When header is submitted to target node. - pub fn headers_submitted(&mut self, ids: Vec>) { - for id in ids { - move_header( - &mut self.ready, - &mut self.submitted, - &mut self.known_headers, - HeaderStatus::Submitted, - &id, - |header| header, - ); - } - } - - /// When header completion data is sent to target node. - pub fn header_completed(&mut self, id: &HeaderIdOf

) { - if self.completion_data.remove(id).is_some() { - log::debug!( - target: "bridge", - "Sent completion data to {} for header: {:?}", - P::TARGET_NAME, - id, - ); - - // transaction can be dropped by target chain nodes => it would never be mined - // - // in current implementation the sync loop would wait for some time && if best - // **source** header won't change on **target** node, then the sync will be restarted - // => we'll resubmit the same completion data again (the same is true for submitted - // headers) - // - // the other option would be to track emitted transactions at least on target node, - // but it won't give us 100% guarantee anyway - // - // => we're just dropping completion data just after it has been submitted - } - } - - /// Marks given headers incomplete. - pub fn add_incomplete_headers(&mut self, make_header_incomplete: bool, new_incomplete_headers: Vec>) { - for new_incomplete_header in new_incomplete_headers { - if make_header_incomplete { - self.header_synced(&new_incomplete_header); - } - - let move_origins = select_synced_children::

(&self.synced_children, &new_incomplete_header); - let move_origins = move_origins.into_iter().chain(std::iter::once(new_incomplete_header)); - for move_origin in move_origins { - move_header_descendants::

( - &mut [&mut self.ready, &mut self.submitted], - &mut self.incomplete, - &mut self.known_headers, - HeaderStatus::Incomplete, - &move_origin, - ); - } - - if make_header_incomplete { - log::debug!( - target: "bridge", - "Scheduling completion data retrieval for header: {:?}", - new_incomplete_header, - ); - - self.incomplete_headers.insert(new_incomplete_header, None); - } - } - } - - /// When incomplete headers ids are receved from target node. - pub fn incomplete_headers_response(&mut self, ids: HashSet>) { - // all new incomplete headers are marked Synced and all their descendants - // are moved from Ready/Submitted to Incomplete queue - let new_incomplete_headers = ids - .iter() - .filter(|id| !self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id)) - .cloned() - .collect::>(); - self.add_incomplete_headers(true, new_incomplete_headers); - - // for all headers that were incompleted previously, but now are completed, we move - // all descendants from incomplete to ready - let just_completed_headers = self - .incomplete_headers - .keys() - .chain(self.completion_data.keys()) - .filter(|id| !ids.contains(id)) - .cloned() - .collect::>(); - for just_completed_header in just_completed_headers { - // sub2eth rejects H if H.Parent is incomplete - // sub2sub allows 'syncing' headers like that - // => let's check if there are some synced children of just completed header - let move_origins = select_synced_children::

(&self.synced_children, &just_completed_header); - let move_origins = move_origins.into_iter().chain(std::iter::once(just_completed_header)); - for move_origin in move_origins { - move_header_descendants::

( - &mut [&mut self.incomplete], - &mut self.ready, - &mut self.known_headers, - HeaderStatus::Ready, - &move_origin, - ); - } - - log::debug!( - target: "bridge", - "Completion data is no longer required for header: {:?}", - just_completed_header, - ); - - self.incomplete_headers.remove(&just_completed_header); - self.completion_data.remove(&just_completed_header); - } - } - - /// Returns true if given header requires completion data. - pub fn requires_completion_data(&self, id: &HeaderIdOf

) -> bool { - self.incomplete_headers.contains_key(id) - } - - /// Returns id of the header for which we want to fetch completion data. - pub fn incomplete_header(&mut self) -> Option> { - queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| { - let retry = match *last_fetch_time { - Some(last_fetch_time) => last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL, - None => true, - }; - - if retry { - *last_fetch_time = Some(Instant::now()); - } - - retry - }) - .map(|(id, _)| id) - } - - /// Returns header completion data to upload to target node. - pub fn header_to_complete(&mut self) -> Option<(HeaderIdOf

, &P::Completion)> { - queued_incomplete_header(&mut self.completion_data, |_| true) - } - - /// Prune and never accept headers before this block. - pub fn prune(&mut self, prune_border: P::Number) { - if prune_border <= self.prune_border { - return; - } - - prune_queue(&mut self.maybe_orphan, prune_border); - prune_queue(&mut self.orphan, prune_border); - prune_queue(&mut self.maybe_extra, prune_border); - prune_queue(&mut self.extra, prune_border); - prune_queue(&mut self.ready, prune_border); - prune_queue(&mut self.submitted, prune_border); - prune_queue(&mut self.incomplete, prune_border); - self.synced_children = self.synced_children.split_off(&prune_border); - prune_known_headers::

(&mut self.known_headers, prune_border); - self.prune_border = prune_border; - } - - /// Forgets all ever known headers. - pub fn clear(&mut self) { - self.maybe_orphan.clear(); - self.orphan.clear(); - self.maybe_extra.clear(); - self.extra.clear(); - self.ready.clear(); - self.incomplete.clear(); - self.submitted.clear(); - self.synced_children.clear(); - self.known_headers.clear(); - self.best_synced_number = Zero::zero(); - self.prune_border = Zero::zero(); - } - - /// Returns true if parent of this header is either incomplete or waiting for - /// its own incomplete ancestor to be completed. - fn is_parent_incomplete(&self, id: &HeaderIdOf

) -> bool { - let status = self.status(id); - let header = match status { - HeaderStatus::MaybeOrphan => header(&self.maybe_orphan, id), - HeaderStatus::Orphan => header(&self.orphan, id), - HeaderStatus::MaybeExtra => header(&self.maybe_extra, id), - HeaderStatus::Extra => header(&self.extra, id), - HeaderStatus::Ready => header(&self.ready, id), - HeaderStatus::Incomplete => header(&self.incomplete, id), - HeaderStatus::Submitted => header(&self.submitted, id), - HeaderStatus::Unknown => return false, - HeaderStatus::Synced => return false, - }; - - match header { - Some(header) => { - let parent_id = header.header().parent_id(); - self.incomplete_headers.contains_key(&parent_id) - || self.completion_data.contains_key(&parent_id) - || self.status(&parent_id) == HeaderStatus::Incomplete - } - None => false, - } - } - - /// When we receive new Synced header from target node. - fn header_synced(&mut self, id: &HeaderIdOf

) { - // update best synced block number - self.best_synced_number = std::cmp::max(self.best_synced_number, id.0); - - // all ancestors of this header are now synced => let's remove them from - // queues - let mut current = *id; - let mut id_processed = false; - let mut previous_current = None; - loop { - let header = match self.status(¤t) { - HeaderStatus::Unknown => break, - HeaderStatus::MaybeOrphan => remove_header(&mut self.maybe_orphan, ¤t), - HeaderStatus::Orphan => remove_header(&mut self.orphan, ¤t), - HeaderStatus::MaybeExtra => remove_header(&mut self.maybe_extra, ¤t), - HeaderStatus::Extra => remove_header(&mut self.extra, ¤t), - HeaderStatus::Ready => remove_header(&mut self.ready, ¤t), - HeaderStatus::Incomplete => remove_header(&mut self.incomplete, ¤t), - HeaderStatus::Submitted => remove_header(&mut self.submitted, ¤t), - HeaderStatus::Synced => break, - } - .expect("header has a given status; given queue has the header; qed"); - - // remember ids of all the children of the current header - let synced_children_entry = self - .synced_children - .entry(current.0) - .or_default() - .entry(current.1) - .or_default(); - let all_queues = [ - &self.maybe_orphan, - &self.orphan, - &self.maybe_extra, - &self.extra, - &self.ready, - &self.incomplete, - &self.submitted, - ]; - for queue in &all_queues { - let children_from_queue = queue - .get(&(current.0 + One::one())) - .map(|potential_children| { - potential_children - .values() - .filter(|potential_child| potential_child.header().parent_id() == current) - .map(|child| child.id()) - .collect::>() - }) - .unwrap_or_default(); - synced_children_entry.extend(children_from_queue); - } - if let Some(previous_current) = previous_current { - synced_children_entry.insert(previous_current); - } - - set_header_status::

(&mut self.known_headers, ¤t, HeaderStatus::Synced); - - previous_current = Some(current); - current = header.parent_id(); - id_processed = true; - } - - // remember that the header itself is synced - // (condition is here to avoid duplicate log messages) - if !id_processed { - set_header_status::

(&mut self.known_headers, id, HeaderStatus::Synced); - } - - // now let's move all descendants from maybe_orphan && orphan queues to - // maybe_extra queue - move_header_descendants::

( - &mut [&mut self.maybe_orphan, &mut self.orphan], - &mut self.maybe_extra, - &mut self.known_headers, - HeaderStatus::MaybeExtra, - id, - ); - } -} - -/// Insert header to the queue. -fn insert_header(queue: &mut HeadersQueue

, id: HeaderIdOf

, header: QueuedHeader

) { - queue.entry(id.0).or_default().insert(id.1, header); -} - -/// Remove header from the queue. -fn remove_header(queue: &mut HeadersQueue

, id: &HeaderIdOf

) -> Option> { - let mut headers_at = match queue.entry(id.0) { - BTreeMapEntry::Occupied(headers_at) => headers_at, - BTreeMapEntry::Vacant(_) => return None, - }; - - let header = headers_at.get_mut().remove(&id.1); - if headers_at.get().is_empty() { - headers_at.remove(); - } - header -} - -/// Get header from the queue. -fn header<'a, P: HeadersSyncPipeline>(queue: &'a HeadersQueue

, id: &HeaderIdOf

) -> Option<&'a QueuedHeader

> { - queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1)) -} - -/// Move header from source to destination queue. -/// -/// Returns ID of parent header, if header has been moved, or None otherwise. -fn move_header( - source_queue: &mut HeadersQueue

, - destination_queue: &mut HeadersQueue

, - known_headers: &mut KnownHeaders

, - destination_status: HeaderStatus, - id: &HeaderIdOf

, - prepare: impl FnOnce(QueuedHeader

) -> QueuedHeader

, -) -> Option> { - let header = match remove_header(source_queue, id) { - Some(header) => prepare(header), - None => return None, - }; - - let parent_id = header.header().parent_id(); - destination_queue.entry(id.0).or_default().insert(id.1, header); - set_header_status::

(known_headers, id, destination_status); - - Some(parent_id) -} - -/// Move all descendant headers from the source to destination queue. -fn move_header_descendants( - source_queues: &mut [&mut HeadersQueue

], - destination_queue: &mut HeadersQueue

, - known_headers: &mut KnownHeaders

, - destination_status: HeaderStatus, - id: &HeaderIdOf

, -) { - let mut current_number = id.0 + One::one(); - let mut current_parents = HashSet::new(); - current_parents.insert(id.1); - - while !current_parents.is_empty() { - let mut next_parents = HashSet::new(); - for source_queue in source_queues.iter_mut() { - let mut source_entry = match source_queue.entry(current_number) { - BTreeMapEntry::Occupied(source_entry) => source_entry, - BTreeMapEntry::Vacant(_) => continue, - }; - - let mut headers_to_move = Vec::new(); - let children_at_number = source_entry.get().keys().cloned().collect::>(); - for key in children_at_number { - let entry = match source_entry.get_mut().entry(key) { - HashMapEntry::Occupied(entry) => entry, - HashMapEntry::Vacant(_) => unreachable!("iterating existing keys; qed"), - }; - - if current_parents.contains(&entry.get().header().parent_id().1) { - let header_to_move = entry.remove(); - let header_to_move_id = header_to_move.id(); - headers_to_move.push((header_to_move_id, header_to_move)); - set_header_status::

(known_headers, &header_to_move_id, destination_status); - } - } - - if source_entry.get().is_empty() { - source_entry.remove(); - } - - next_parents.extend(headers_to_move.iter().map(|(id, _)| id.1)); - - destination_queue - .entry(current_number) - .or_default() - .extend(headers_to_move.into_iter().map(|(id, h)| (id.1, h))) - } - - current_number = current_number + One::one(); - std::mem::swap(&mut current_parents, &mut next_parents); - } -} - -/// Selects (recursive) all synced children of given header. -fn select_synced_children( - synced_children: &SyncedChildren

, - id: &HeaderIdOf

, -) -> Vec> { - let mut result = Vec::new(); - let mut current_parents = HashSet::new(); - current_parents.insert(*id); - - while !current_parents.is_empty() { - let mut next_parents = HashSet::new(); - for current_parent in ¤t_parents { - let current_parent_synced_children = synced_children - .get(¤t_parent.0) - .and_then(|by_number_entry| by_number_entry.get(¤t_parent.1)); - if let Some(current_parent_synced_children) = current_parent_synced_children { - for current_parent_synced_child in current_parent_synced_children { - result.push(*current_parent_synced_child); - next_parents.insert(*current_parent_synced_child); - } - } - } - - let _ = std::mem::replace(&mut current_parents, next_parents); - } - - result -} - -/// Return oldest header from the queue. -fn oldest_header(queue: &HeadersQueue

) -> Option<&QueuedHeader

> { - queue.values().flat_map(|h| h.values()).next() -} - -/// Return oldest headers from the queue until functor will return false. -fn oldest_headers( - queue: &HeadersQueue

, - mut f: impl FnMut(&QueuedHeader

) -> bool, -) -> Option>> { - let result = queue - .values() - .flat_map(|h| h.values()) - .take_while(|h| f(h)) - .collect::>(); - if result.is_empty() { - None - } else { - Some(result) - } -} - -/// Forget all headers with number less than given. -fn prune_queue(queue: &mut HeadersQueue

, prune_border: P::Number) { - *queue = queue.split_off(&prune_border); -} - -/// Forget all known headers with number less than given. -fn prune_known_headers(known_headers: &mut KnownHeaders

, prune_border: P::Number) { - let new_known_headers = known_headers.split_off(&prune_border); - for (pruned_number, pruned_headers) in &*known_headers { - for pruned_hash in pruned_headers.keys() { - log::debug!(target: "bridge", "Pruning header {:?}.", HeaderId(*pruned_number, *pruned_hash)); - } - } - *known_headers = new_known_headers; -} - -/// Change header status. -fn set_header_status( - known_headers: &mut KnownHeaders

, - id: &HeaderIdOf

, - status: HeaderStatus, -) { - log::debug!( - target: "bridge", - "{} header {:?} is now {:?}", - P::SOURCE_NAME, - id, - status, - ); - *known_headers.entry(id.0).or_default().entry(id.1).or_insert(status) = status; -} - -/// Returns queued incomplete header with maximal elapsed time since last update. -fn queued_incomplete_header( - map: &mut LinkedHashMap, - filter: impl FnMut(&mut T) -> bool, -) -> Option<(Id, &T)> { - // TODO (#84): headers that have been just appended to the end of the queue would have to wait until - // all previous headers will be retried - - let retry_old_header = map - .front() - .map(|(key, _)| key.clone()) - .and_then(|key| map.get_mut(&key).map(filter)) - .unwrap_or(false); - if retry_old_header { - let (header_key, header) = map.pop_front().expect("we have checked that front() exists; qed"); - map.insert(header_key, header); - return map.back().map(|(id, data)| (id.clone(), data)); - } - - None -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::sync_loop_tests::{TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber}; - use crate::sync_types::QueuedHeader; - - pub(crate) fn header(number: TestNumber) -> QueuedHeader { - QueuedHeader::new(TestHeader { - number, - hash: hash(number), - parent_hash: hash(number - 1), - }) - } - - pub(crate) fn hash(number: TestNumber) -> TestHash { - number - } - - pub(crate) fn id(number: TestNumber) -> TestHeaderId { - HeaderId(number, hash(number)) - } - - #[test] - fn total_headers_works() { - // total headers just sums up number of headers in every queue - let mut queue = QueuedHeaders::::default(); - queue.maybe_orphan.entry(1).or_default().insert( - hash(1), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(1).or_default().insert( - hash(2), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(2).or_default().insert( - hash(3), - QueuedHeader::::new(Default::default()), - ); - queue.orphan.entry(3).or_default().insert( - hash(4), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_extra.entry(4).or_default().insert( - hash(5), - QueuedHeader::::new(Default::default()), - ); - queue.ready.entry(5).or_default().insert( - hash(6), - QueuedHeader::::new(Default::default()), - ); - queue.incomplete.entry(6).or_default().insert( - hash(7), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.total_headers(), 7); - } - - #[test] - fn best_queued_number_works() { - // initially there are headers in MaybeOrphan queue only - let mut queue = QueuedHeaders::::default(); - queue.maybe_orphan.entry(1).or_default().insert( - hash(1), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(1).or_default().insert( - hash(2), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(3).or_default().insert( - hash(3), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 3); - // and then there's better header in Orphan - queue.orphan.entry(10).or_default().insert( - hash(10), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 10); - // and then there's better header in MaybeExtra - queue.maybe_extra.entry(20).or_default().insert( - hash(20), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 20); - // and then there's better header in Ready - queue.ready.entry(30).or_default().insert( - hash(30), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 30); - // and then there's better header in MaybeOrphan again - queue.maybe_orphan.entry(40).or_default().insert( - hash(40), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 40); - // and then there's some header in Incomplete - queue.incomplete.entry(50).or_default().insert( - hash(50), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 50); - } - - #[test] - fn status_works() { - // all headers are unknown initially - let mut queue = QueuedHeaders::::default(); - assert_eq!(queue.status(&id(10)), HeaderStatus::Unknown); - // and status is read from the KnownHeaders - queue - .known_headers - .entry(10) - .or_default() - .insert(hash(10), HeaderStatus::Ready); - assert_eq!(queue.status(&id(10)), HeaderStatus::Ready); - } - - #[test] - fn header_works() { - // initially we have oldest header #10 - let mut queue = QueuedHeaders::::default(); - queue.maybe_orphan.entry(10).or_default().insert(hash(1), header(100)); - assert_eq!( - queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, - hash(100) - ); - // inserting #20 changes nothing - queue.maybe_orphan.entry(20).or_default().insert(hash(1), header(101)); - assert_eq!( - queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, - hash(100) - ); - // inserting #5 makes it oldest - queue.maybe_orphan.entry(5).or_default().insert(hash(1), header(102)); - assert_eq!( - queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, - hash(102) - ); - } - - #[test] - fn header_response_works() { - // when parent is Synced, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Synced); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Ready, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Receipts, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Extra); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is MaybeExtra, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Orphan, we insert to Orphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Orphan); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::Orphan); - - // when parent is MaybeOrphan, we insert to MaybeOrphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); - - // when parent is unknown, we insert to MaybeOrphan - let mut queue = QueuedHeaders::::default(); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); - } - - #[test] - fn ancestors_are_synced_on_substrate_best_header_response() { - // let's say someone else has submitted transaction to bridge that changes - // its best block to #100. At this time we have: - // #100 in MaybeOrphan - // #99 in Orphan - // #98 in MaybeExtra - // #97 in Receipts - // #96 in Ready - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(100) - .or_default() - .insert(hash(100), header(100)); - queue - .known_headers - .entry(99) - .or_default() - .insert(hash(99), HeaderStatus::Orphan); - queue.orphan.entry(99).or_default().insert(hash(99), header(99)); - queue - .known_headers - .entry(98) - .or_default() - .insert(hash(98), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(98).or_default().insert(hash(98), header(98)); - queue - .known_headers - .entry(97) - .or_default() - .insert(hash(97), HeaderStatus::Extra); - queue.extra.entry(97).or_default().insert(hash(97), header(97)); - queue - .known_headers - .entry(96) - .or_default() - .insert(hash(96), HeaderStatus::Ready); - queue.ready.entry(96).or_default().insert(hash(96), header(96)); - queue.target_best_header_response(&id(100)); - - // then the #100 and all ancestors of #100 (#96..#99) are treated as synced - assert!(queue.maybe_orphan.is_empty()); - assert!(queue.orphan.is_empty()); - assert!(queue.maybe_extra.is_empty()); - assert!(queue.extra.is_empty()); - assert!(queue.ready.is_empty()); - assert_eq!(queue.known_headers.len(), 5); - assert!(queue - .known_headers - .values() - .all(|s| s.values().all(|s| *s == HeaderStatus::Synced))); - - // children of synced headers are stored - assert_eq!( - vec![id(97)], - queue.synced_children[&96][&hash(96)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!( - vec![id(98)], - queue.synced_children[&97][&hash(97)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!( - vec![id(99)], - queue.synced_children[&98][&hash(98)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!( - vec![id(100)], - queue.synced_children[&99][&hash(99)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!(0, queue.synced_children[&100][&hash(100)].len()); - } - - #[test] - fn descendants_are_moved_on_substrate_best_header_response() { - // let's say someone else has submitted transaction to bridge that changes - // its best block to #100. At this time we have: - // #101 in Orphan - // #102 in MaybeOrphan - // #103 in Orphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Orphan); - queue.orphan.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(102) - .or_default() - .insert(hash(102), header(102)); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Orphan); - queue.orphan.entry(103).or_default().insert(hash(103), header(103)); - queue.target_best_header_response(&id(100)); - - // all descendants are moved to MaybeExtra - assert!(queue.maybe_orphan.is_empty()); - assert!(queue.orphan.is_empty()); - assert_eq!(queue.maybe_extra.len(), 3); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&103][&hash(103)], HeaderStatus::MaybeExtra); - } - - #[test] - fn positive_maybe_orphan_response_works() { - // let's say we have: - // #100 in MaybeOrphan - // #101 in Orphan - // #102 in MaybeOrphan - // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) - // and the response is: YES, #99 is known to the Substrate runtime - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(100) - .or_default() - .insert(hash(100), header(100)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Orphan); - queue.orphan.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(102) - .or_default() - .insert(hash(102), header(102)); - queue.maybe_orphan_response(&id(99), true); - - // then all headers (#100..#103) are moved to the MaybeExtra queue - assert!(queue.orphan.is_empty()); - assert!(queue.maybe_orphan.is_empty()); - assert_eq!(queue.maybe_extra.len(), 3); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); - } - - #[test] - fn negative_maybe_orphan_response_works() { - // let's say we have: - // #100 in MaybeOrphan - // #101 in MaybeOrphan - // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) - // and the response is: NO, #99 is NOT known to the Substrate runtime - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(100) - .or_default() - .insert(hash(100), header(100)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(101) - .or_default() - .insert(hash(101), header(101)); - queue.maybe_orphan_response(&id(99), false); - - // then all headers (#100..#101) are moved to the Orphan queue - assert!(queue.maybe_orphan.is_empty()); - assert_eq!(queue.orphan.len(), 2); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Orphan); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::Orphan); - } - - #[test] - fn positive_maybe_extra_response_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); - queue.maybe_extra_response(&id(100), true); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.extra.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Extra); - } - - #[test] - fn negative_maybe_extra_response_works() { - // when parent header is complete - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); - queue.maybe_extra_response(&id(100), false); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.ready.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); - - // when parent header is incomplete - queue.incomplete_headers.insert(id(200), None); - queue - .known_headers - .entry(201) - .or_default() - .insert(hash(201), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(201).or_default().insert(hash(201), header(201)); - queue.maybe_extra_response(&id(201), false); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); - } - - #[test] - fn receipts_response_works() { - // when parent header is complete - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Extra); - queue.extra.entry(100).or_default().insert(hash(100), header(100)); - queue.extra_response(&id(100), 100_100); - assert!(queue.extra.is_empty()); - assert_eq!(queue.ready.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); - - // when parent header is incomplete - queue.incomplete_headers.insert(id(200), None); - queue - .known_headers - .entry(201) - .or_default() - .insert(hash(201), HeaderStatus::Extra); - queue.extra.entry(201).or_default().insert(hash(201), header(201)); - queue.extra_response(&id(201), 201_201); - assert!(queue.extra.is_empty()); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); - } - - #[test] - fn header_submitted_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.ready.entry(100).or_default().insert(hash(100), header(100)); - queue.headers_submitted(vec![id(100)]); - assert!(queue.ready.is_empty()); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Submitted); - } - - #[test] - fn incomplete_header_works() { - let mut queue = QueuedHeaders::::default(); - - // nothing to complete if queue is empty - assert_eq!(queue.incomplete_header(), None); - - // when there's new header to complete => ask for completion data - queue.incomplete_headers.insert(id(100), None); - assert_eq!(queue.incomplete_header(), Some(id(100))); - - // we have just asked for completion data => nothing to request - assert_eq!(queue.incomplete_header(), None); - - // enough time have passed => ask again - queue.incomplete_headers.clear(); - queue.incomplete_headers.insert( - id(100), - Some(Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL), - ); - assert_eq!(queue.incomplete_header(), Some(id(100))); - } - - #[test] - fn completion_response_works() { - let mut queue = QueuedHeaders::::default(); - queue.incomplete_headers.insert(id(100), None); - queue.incomplete_headers.insert(id(200), Some(Instant::now())); - queue.incomplete_headers.insert(id(300), Some(Instant::now())); - - // when header isn't incompete, nothing changes - queue.completion_response(&id(400), None); - assert_eq!(queue.incomplete_headers.len(), 3); - assert_eq!(queue.completion_data.len(), 0); - assert_eq!(queue.header_to_complete(), None); - - // when response is None, nothing changes - queue.completion_response(&id(100), None); - assert_eq!(queue.incomplete_headers.len(), 3); - assert_eq!(queue.completion_data.len(), 0); - assert_eq!(queue.header_to_complete(), None); - - // when response is Some, we're scheduling completion - queue.completion_response(&id(200), Some(200_200)); - assert_eq!(queue.completion_data.len(), 1); - assert!(queue.completion_data.contains_key(&id(200))); - assert_eq!(queue.header_to_complete(), Some((id(200), &200_200))); - assert_eq!( - queue.incomplete_headers.keys().collect::>(), - vec![&id(100), &id(300), &id(200)], - ); - } - - #[test] - fn header_completed_works() { - let mut queue = QueuedHeaders::::default(); - queue.completion_data.insert(id(100), 100_100); - - // when unknown header is completed - queue.header_completed(&id(200)); - assert_eq!(queue.completion_data.len(), 1); - - // when known header is completed - queue.header_completed(&id(100)); - assert_eq!(queue.completion_data.len(), 0); - } - - #[test] - fn incomplete_headers_response_works() { - let mut queue = QueuedHeaders::::default(); - - // when we have already submitted #101 and #102 is ready - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Ready); - queue.submitted.entry(102).or_default().insert(hash(102), header(102)); - - // AND now we know that the #100 is incomplete - queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); - - // => #101 and #102 are moved to the Incomplete and #100 is now synced - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Incomplete); - assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); - assert_eq!(queue.submitted.len(), 0); - assert_eq!(queue.ready.len(), 0); - assert!(queue.incomplete.entry(101).or_default().contains_key(&hash(101))); - assert!(queue.incomplete.entry(102).or_default().contains_key(&hash(102))); - assert!(queue.incomplete_headers.contains_key(&id(100))); - assert!(queue.completion_data.is_empty()); - - // and then header #100 is no longer incomplete - queue.incomplete_headers_response(vec![].into_iter().collect()); - - // => #101 and #102 are moved to the Ready queue and #100 if now forgotten - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Ready); - assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); - assert_eq!(queue.incomplete.len(), 0); - assert_eq!(queue.submitted.len(), 0); - assert!(queue.ready.entry(101).or_default().contains_key(&hash(101))); - assert!(queue.ready.entry(102).or_default().contains_key(&hash(102))); - assert!(queue.incomplete_headers.is_empty()); - assert!(queue.completion_data.is_empty()); - } - - #[test] - fn is_parent_incomplete_works() { - let mut queue = QueuedHeaders::::default(); - - // when we do not know header itself - assert!(!queue.is_parent_incomplete(&id(50))); - - // when we do not know parent - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Incomplete); - queue.incomplete.entry(100).or_default().insert(hash(100), header(100)); - assert!(!queue.is_parent_incomplete(&id(100))); - - // when parent is inside incomplete queue (i.e. some other ancestor is actually incomplete) - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - assert!(queue.is_parent_incomplete(&id(101))); - - // when parent is the incomplete header and we do not have completion data - queue.incomplete_headers.insert(id(199), None); - queue - .known_headers - .entry(200) - .or_default() - .insert(hash(200), HeaderStatus::Submitted); - queue.submitted.entry(200).or_default().insert(hash(200), header(200)); - assert!(queue.is_parent_incomplete(&id(200))); - - // when parent is the incomplete header and we have completion data - queue.completion_data.insert(id(299), 299_299); - queue - .known_headers - .entry(300) - .or_default() - .insert(hash(300), HeaderStatus::Submitted); - queue.submitted.entry(300).or_default().insert(hash(300), header(300)); - assert!(queue.is_parent_incomplete(&id(300))); - } - - #[test] - fn prune_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(105) - .or_default() - .insert(hash(105), HeaderStatus::Incomplete); - queue.incomplete.entry(105).or_default().insert(hash(105), header(105)); - queue - .known_headers - .entry(104) - .or_default() - .insert(hash(104), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(104) - .or_default() - .insert(hash(104), header(104)); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Orphan); - queue.orphan.entry(103).or_default().insert(hash(103), header(103)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(102).or_default().insert(hash(102), header(102)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Extra); - queue.extra.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.ready.entry(100).or_default().insert(hash(100), header(100)); - queue - .synced_children - .entry(100) - .or_default() - .insert(hash(100), vec![id(101)].into_iter().collect()); - queue - .synced_children - .entry(102) - .or_default() - .insert(hash(102), vec![id(102)].into_iter().collect()); - - queue.prune(102); - - assert_eq!(queue.ready.len(), 0); - assert_eq!(queue.extra.len(), 0); - assert_eq!(queue.maybe_extra.len(), 1); - assert_eq!(queue.orphan.len(), 1); - assert_eq!(queue.maybe_orphan.len(), 1); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.synced_children.len(), 1); - assert_eq!(queue.known_headers.len(), 4); - - queue.prune(110); - - assert_eq!(queue.ready.len(), 0); - assert_eq!(queue.extra.len(), 0); - assert_eq!(queue.maybe_extra.len(), 0); - assert_eq!(queue.orphan.len(), 0); - assert_eq!(queue.maybe_orphan.len(), 0); - assert_eq!(queue.incomplete.len(), 0); - assert_eq!(queue.synced_children.len(), 0); - assert_eq!(queue.known_headers.len(), 0); - - queue.header_response(header(109).header().clone()); - assert_eq!(queue.known_headers.len(), 0); - - queue.header_response(header(110).header().clone()); - assert_eq!(queue.known_headers.len(), 1); - } - - #[test] - fn incomplete_headers_are_still_incomplete_after_advance() { - let mut queue = QueuedHeaders::::default(); - - // relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete queue - queue.incomplete_headers.insert(id(100), None); - queue.incomplete.entry(101).or_default().insert(hash(101), header(101)); - queue.incomplete.entry(102).or_default().insert(hash(102), header(102)); - queue.incomplete.entry(103).or_default().insert(hash(103), header(103)); - queue.incomplete.entry(104).or_default().insert(hash(104), header(104)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Synced); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Incomplete); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Incomplete); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Incomplete); - queue - .known_headers - .entry(104) - .or_default() - .insert(hash(104), HeaderStatus::Incomplete); - - // let's say relay#2 completes header#100 and then submits header#101+header#102 and it turns - // out that header#102 is also incomplete - queue.incomplete_headers_response(vec![id(102)].into_iter().collect()); - - // then the header#103 and the header#104 must have Incomplete status - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(103)), HeaderStatus::Incomplete); - assert_eq!(queue.status(&id(104)), HeaderStatus::Incomplete); - } - - #[test] - fn incomplete_headers_response_moves_synced_headers() { - let mut queue = QueuedHeaders::::default(); - - // we have submitted two headers - 100 and 101. 102 is ready - queue.submitted.entry(100).or_default().insert(hash(100), header(100)); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - queue.ready.entry(102).or_default().insert(hash(102), header(102)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Submitted); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Ready); - - // both headers are accepted - queue.target_best_header_response(&id(101)); - - // but header 100 is incomplete - queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); - assert!(queue.incomplete_headers.contains_key(&id(100))); - assert!(queue.incomplete[&102].contains_key(&hash(102))); - - // when header 100 is completed, 101 is synced and 102 is ready - queue.incomplete_headers_response(HashSet::new()); - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); - assert!(queue.ready[&102].contains_key(&hash(102))); - } -} diff --git a/polkadot/bridges/relays/headers/src/sync.rs b/polkadot/bridges/relays/headers/src/sync.rs deleted file mode 100644 index e992b1f8e583c14a109a240da8c3c7dc676d6192..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers/src/sync.rs +++ /dev/null @@ -1,523 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Headers synchronization context. This structure wraps headers queue and is -//! able to choose: which headers to read from the source chain? Which headers -//! to submit to the target chain? The context makes decisions basing on parameters -//! passed using `HeadersSyncParams` structure. - -use crate::headers::QueuedHeaders; -use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader}; -use num_traits::{One, Saturating, Zero}; - -/// Common sync params. -#[derive(Debug, Clone)] -pub struct HeadersSyncParams { - /// Maximal number of ethereum headers to pre-download. - pub max_future_headers_to_download: usize, - /// Maximal number of active (we believe) submit header transactions. - pub max_headers_in_submitted_status: usize, - /// Maximal number of headers in single submit request. - pub max_headers_in_single_submit: usize, - /// Maximal total headers size in single submit request. - pub max_headers_size_in_single_submit: usize, - /// We only may store and accept (from Ethereum node) headers that have - /// number >= than best_substrate_header.number - prune_depth. - pub prune_depth: u32, - /// Target transactions mode. - pub target_tx_mode: TargetTransactionMode, -} - -/// Target transaction mode. -#[derive(Debug, PartialEq, Clone)] -pub enum TargetTransactionMode { - /// Submit new headers using signed transactions. - Signed, - /// Submit new headers using unsigned transactions. - Unsigned, - /// Submit new headers using signed transactions, but only when we - /// believe that sync has stalled. - Backup, -} - -/// Headers synchronization context. -#[derive(Debug)] -pub struct HeadersSync { - /// Synchronization parameters. - params: HeadersSyncParams, - /// Best header number known to source node. - source_best_number: Option, - /// Best header known to target node. - target_best_header: Option>, - /// Headers queue. - headers: QueuedHeaders

, - /// Pause headers submission. - pause_submit: bool, -} - -impl HeadersSync

{ - /// Creates new headers synchronizer. - pub fn new(params: HeadersSyncParams) -> Self { - HeadersSync { - headers: QueuedHeaders::default(), - params, - source_best_number: None, - target_best_header: None, - pause_submit: false, - } - } - - /// Return best header number known to source node. - pub fn source_best_number(&self) -> Option { - self.source_best_number - } - - /// Best header known to target node. - pub fn target_best_header(&self) -> Option> { - self.target_best_header - } - - /// Returns true if we have synced almost all known headers. - pub fn is_almost_synced(&self) -> bool { - match self.source_best_number { - Some(source_best_number) => self - .target_best_header - .map(|best| source_best_number.saturating_sub(best.0) < 4.into()) - .unwrap_or(false), - None => true, - } - } - - /// Returns synchronization status. - pub fn status(&self) -> (&Option>, &Option) { - (&self.target_best_header, &self.source_best_number) - } - - /// Returns reference to the headers queue. - pub fn headers(&self) -> &QueuedHeaders

{ - &self.headers - } - - /// Returns mutable reference to the headers queue. - pub fn headers_mut(&mut self) -> &mut QueuedHeaders

{ - &mut self.headers - } - - /// Select header that needs to be downloaded from the source node. - pub fn select_new_header_to_download(&self) -> Option { - // if we haven't received best header from source node yet, there's nothing we can download - let source_best_number = self.source_best_number?; - - // if we haven't received known best header from target node yet, there's nothing we can download - let target_best_header = self.target_best_header.as_ref()?; - - // if there's too many headers in the queue, stop downloading - let in_memory_headers = self.headers.total_headers(); - if in_memory_headers >= self.params.max_future_headers_to_download { - return None; - } - - // if queue is empty and best header on target is > than best header on source, - // then we shoud reorg - let best_queued_number = self.headers.best_queued_number(); - if best_queued_number.is_zero() && source_best_number < target_best_header.0 { - return Some(source_best_number); - } - - // we assume that there were no reorgs if we have already downloaded best header - let best_downloaded_number = std::cmp::max( - std::cmp::max(best_queued_number, self.headers.best_synced_number()), - target_best_header.0, - ); - if best_downloaded_number >= source_best_number { - return None; - } - - // download new header - Some(best_downloaded_number + One::one()) - } - - /// Selech orphan header to downoload. - pub fn select_orphan_header_to_download(&self) -> Option<&QueuedHeader

> { - let orphan_header = self.headers.header(HeaderStatus::Orphan)?; - - // we consider header orphan until we'll find it ancestor that is known to the target node - // => we may get orphan header while we ask target node whether it knows its parent - // => let's avoid fetching duplicate headers - let parent_id = orphan_header.parent_id(); - if self.headers.status(&parent_id) != HeaderStatus::Unknown { - return None; - } - - Some(orphan_header) - } - - /// Select headers that need to be submitted to the target node. - pub fn select_headers_to_submit(&self, stalled: bool) -> Option>> { - // maybe we have paused new headers submit? - if self.pause_submit { - return None; - } - - // if we operate in backup mode, we only submit headers when sync has stalled - if self.params.target_tx_mode == TargetTransactionMode::Backup && !stalled { - return None; - } - - let headers_in_submit_status = self.headers.headers_in_status(HeaderStatus::Submitted); - let headers_to_submit_count = self - .params - .max_headers_in_submitted_status - .checked_sub(headers_in_submit_status)?; - - let mut total_size = 0; - let mut total_headers = 0; - self.headers.headers(HeaderStatus::Ready, |header| { - if total_headers == headers_to_submit_count { - return false; - } - if total_headers == self.params.max_headers_in_single_submit { - return false; - } - - let encoded_size = P::estimate_size(header); - if total_headers != 0 && total_size + encoded_size > self.params.max_headers_size_in_single_submit { - return false; - } - - total_size += encoded_size; - total_headers += 1; - - true - }) - } - - /// Receive new target header number from the source node. - pub fn source_best_header_number_response(&mut self, best_header_number: P::Number) { - log::debug!( - target: "bridge", - "Received best header number from {} node: {}", - P::SOURCE_NAME, - best_header_number, - ); - self.source_best_number = Some(best_header_number); - } - - /// Receive new best header from the target node. - /// Returns true if it is different from the previous block known to us. - pub fn target_best_header_response(&mut self, best_header: HeaderIdOf

) -> bool { - log::debug!( - target: "bridge", - "Received best known header from {}: {:?}", - P::TARGET_NAME, - best_header, - ); - - // early return if it is still the same - if self.target_best_header == Some(best_header) { - return false; - } - - // remember that this header is now known to the Substrate runtime - self.headers.target_best_header_response(&best_header); - - // prune ancient headers - self.headers - .prune(best_header.0.saturating_sub(self.params.prune_depth.into())); - - // finally remember the best header itself - self.target_best_header = Some(best_header); - - // we are ready to submit headers again - if self.pause_submit { - log::debug!( - target: "bridge", - "Ready to submit {} headers to {} node again!", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - - self.pause_submit = false; - } - - true - } - - /// Pause headers submit until best header will be updated on target node. - pub fn pause_submit(&mut self) { - log::debug!( - target: "bridge", - "Stopping submitting {} headers to {} node. Waiting for {} submitted headers to be accepted", - P::SOURCE_NAME, - P::TARGET_NAME, - self.headers.headers_in_status(HeaderStatus::Submitted), - ); - - self.pause_submit = true; - } - - /// Restart synchronization. - pub fn restart(&mut self) { - self.source_best_number = None; - self.target_best_header = None; - self.headers.clear(); - self.pause_submit = false; - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use crate::headers::tests::{header, id}; - use crate::sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber}; - use crate::sync_types::HeaderStatus; - use relay_utils::HeaderId; - - fn side_hash(number: TestNumber) -> TestHash { - 1000 + number - } - - pub fn default_sync_params() -> HeadersSyncParams { - HeadersSyncParams { - max_future_headers_to_download: 128, - max_headers_in_submitted_status: 128, - max_headers_in_single_submit: 32, - max_headers_size_in_single_submit: 131_072, - prune_depth: 4096, - target_tx_mode: TargetTransactionMode::Signed, - } - } - - #[test] - fn select_new_header_to_download_works() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - - // both best && target headers are unknown - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // best header is known, target header is unknown - eth_sync.target_best_header = Some(HeaderId(0, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // target header is known, best header is unknown - eth_sync.target_best_header = None; - eth_sync.source_best_number = Some(100); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // when our best block has the same number as the target - eth_sync.target_best_header = Some(HeaderId(100, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // when we actually need a new header - eth_sync.source_best_number = Some(101); - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - - // when we have to reorganize to longer fork - eth_sync.source_best_number = Some(100); - eth_sync.target_best_header = Some(HeaderId(200, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), Some(100)); - - // when there are too many headers scheduled for submitting - for i in 1..1000 { - eth_sync.headers.header_response(header(i).header().clone()); - } - assert_eq!(eth_sync.select_new_header_to_download(), None); - } - - #[test] - fn select_new_header_to_download_works_with_empty_queue() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - eth_sync.source_best_header_number_response(100); - - // when queue is not empty => everything goes as usually - eth_sync.target_best_header_response(header(10).id()); - eth_sync.headers_mut().header_response(header(11).header().clone()); - eth_sync.headers_mut().maybe_extra_response(&header(11).id(), false); - assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); - - // but then queue is drained - eth_sync.headers_mut().target_best_header_response(&header(11).id()); - - // even though it's empty, we know that header#11 is synced - assert_eq!(eth_sync.headers().best_queued_number(), 0); - assert_eq!(eth_sync.headers().best_synced_number(), 11); - assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); - } - - #[test] - fn sync_without_reorgs_works() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.max_headers_in_submitted_status = 1; - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100 - eth_sync.target_best_header_response(id(100)); - - // block #101 is downloaded first - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - eth_sync.headers.header_response(header(101).header().clone()); - - // now header #101 is ready to be submitted - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); - eth_sync.headers.maybe_extra_response(&id(101), false); - assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(101))); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - - // and header #102 is ready to be downloaded - assert_eq!(eth_sync.select_new_header_to_download(), Some(102)); - eth_sync.headers.header_response(header(102).header().clone()); - - // receive submission confirmation - eth_sync.headers.headers_submitted(vec![id(101)]); - - // we have nothing to submit because previous header hasn't been confirmed yet - // (and we allow max 1 submit transaction in the wild) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(102))); - eth_sync.headers.maybe_extra_response(&id(102), false); - assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(102))); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // substrate reports that it has imported block #101 - eth_sync.target_best_header_response(id(101)); - - // and we are ready to submit #102 - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); - eth_sync.headers.headers_submitted(vec![id(102)]); - - // substrate reports that it has imported block #102 - eth_sync.target_best_header_response(id(102)); - - // and we have nothing to download - assert_eq!(eth_sync.select_new_header_to_download(), None); - } - - #[test] - fn sync_with_orphan_headers_work() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100, but it isn't part of best chain - eth_sync.target_best_header_response(HeaderId(100, side_hash(100))); - - // block #101 is downloaded first - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - eth_sync.headers.header_response(header(101).header().clone()); - - // we can't submit header #101, because its parent status is unknown - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // instead we are trying to determine status of its parent (#100) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(101))); - - // and the status is still unknown - eth_sync.headers.maybe_orphan_response(&id(100), false); - - // so we consider #101 orphaned now && will download its parent - #100 - assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); - eth_sync.headers.header_response(header(100).header().clone()); - - // #101 is now Orphan and #100 is MaybeOrphan => we do not want to retrieve - // header #100 again - assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); - assert_eq!(eth_sync.select_orphan_header_to_download(), None); - - // we can't submit header #100, because its parent status is unknown - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // instead we are trying to determine status of its parent (#99) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(100))); - - // and the status is known, so we move previously orphaned #100 and #101 to ready queue - eth_sync.headers.maybe_orphan_response(&id(99), true); - - // and we are ready to submit #100 - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(100))); - eth_sync.headers.maybe_extra_response(&id(100), false); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(100)])); - eth_sync.headers.headers_submitted(vec![id(100)]); - - // and we are ready to submit #101 - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); - eth_sync.headers.maybe_extra_response(&id(101), false); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - eth_sync.headers.headers_submitted(vec![id(101)]); - } - - #[test] - fn pruning_happens_on_target_best_header_response() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - eth_sync.params.prune_depth = 50; - eth_sync.target_best_header_response(id(100)); - assert_eq!(eth_sync.headers.prune_border(), 50); - } - - #[test] - fn only_submitting_headers_in_backup_mode_when_stalled() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.target_tx_mode = TargetTransactionMode::Backup; - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100 - eth_sync.target_best_header_response(id(100)); - - // block #101 is downloaded first - eth_sync.headers.header_response(header(101).header().clone()); - eth_sync.headers.maybe_extra_response(&id(101), false); - - // ensure that headers are not submitted when sync is not stalled - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // ensure that headers are not submitted when sync is stalled - assert_eq!(eth_sync.select_headers_to_submit(true), Some(vec![&header(101)])); - } - - #[test] - fn does_not_select_new_headers_to_submit_when_submit_is_paused() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.max_headers_in_submitted_status = 1; - - // ethereum reports best header #102 and substrate is at #100 - eth_sync.source_best_header_number_response(102); - eth_sync.target_best_header_response(id(100)); - - // let's prepare #101 and #102 for submitting - eth_sync.headers.header_response(header(101).header().clone()); - eth_sync.headers.maybe_extra_response(&id(101), false); - eth_sync.headers.header_response(header(102).header().clone()); - eth_sync.headers.maybe_extra_response(&id(102), false); - - // when submit is not paused, we're ready to submit #101 - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - - // when submit is paused, we're not ready to submit anything - eth_sync.pause_submit(); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // if best header on substrate node isn't updated, we still not submitting anything - eth_sync.target_best_header_response(id(100)); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // but after it is actually updated, we are ready to submit - eth_sync.target_best_header_response(id(101)); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); - } -} diff --git a/polkadot/bridges/relays/headers/src/sync_loop.rs b/polkadot/bridges/relays/headers/src/sync_loop.rs deleted file mode 100644 index b204932056509a4891788a1cc284f30ebd3a9b8d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers/src/sync_loop.rs +++ /dev/null @@ -1,637 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Entrypoint for running headers synchronization loop. - -use crate::sync::{HeadersSync, HeadersSyncParams}; -use crate::sync_loop_metrics::SyncLoopMetrics; -use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders}; - -use async_trait::async_trait; -use futures::{future::FutureExt, stream::StreamExt}; -use num_traits::{Saturating, Zero}; -use relay_utils::{ - format_ids, interval, - metrics::{GlobalMetrics, MetricsParams}, - process_future_result, - relay_loop::Client as RelayClient, - retry_backoff, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, -}; -use std::{ - collections::HashSet, - future::Future, - time::{Duration, Instant}, -}; - -/// When we submit headers to target node, but see no updates of best -/// source block known to target node during STALL_SYNC_TIMEOUT seconds, -/// we consider that our headers are rejected because there has been reorg in target chain. -/// This reorg could invalidate our knowledge about sync process (i.e. we have asked if -/// HeaderA is known to target, but then reorg happened and the answer is different -/// now) => we need to reset sync. -/// The other option is to receive **EVERY** best target header and check if it is -/// direct child of previous best header. But: (1) subscription doesn't guarantee that -/// the subscriber will receive every best header (2) reorg won't always lead to sync -/// stall and restart is a heavy operation (we forget all in-memory headers). -const STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(5 * 60); -/// Delay after we have seen update of best source header at target node, -/// for us to treat sync stalled. ONLY when relay operates in backup mode. -const BACKUP_STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(10 * 60); -/// Interval between calling sync maintain procedure. -const MAINTAIN_INTERVAL: Duration = Duration::from_secs(30); - -/// Source client trait. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Get best block number. - async fn best_block_number(&self) -> Result; - - /// Get header by hash. - async fn header_by_hash(&self, hash: P::Hash) -> Result; - - /// Get canonical header by number. - async fn header_by_number(&self, number: P::Number) -> Result; - - /// Get completion data by header hash. - async fn header_completion(&self, id: HeaderIdOf

) - -> Result<(HeaderIdOf

, Option), Self::Error>; - - /// Get extra data by header hash. - async fn header_extra( - &self, - id: HeaderIdOf

, - header: QueuedHeader

, - ) -> Result<(HeaderIdOf

, P::Extra), Self::Error>; -} - -/// Target client trait. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Returns ID of best header known to the target node. - async fn best_header_id(&self) -> Result, Self::Error>; - - /// Returns true if header is known to the target node. - async fn is_known_header(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, bool), Self::Error>; - - /// Submit headers. - async fn submit_headers(&self, headers: Vec>) -> SubmittedHeaders, Self::Error>; - - /// Returns ID of headers that require to be 'completed' before children can be submitted. - async fn incomplete_headers_ids(&self) -> Result>, Self::Error>; - - /// Submit completion data for header. - async fn complete_header(&self, id: HeaderIdOf

, completion: P::Completion) - -> Result, Self::Error>; - - /// Returns true if header requires extra data to be submitted. - async fn requires_extra(&self, header: QueuedHeader

) -> Result<(HeaderIdOf

, bool), Self::Error>; -} - -/// Synchronization maintain procedure. -#[async_trait] -pub trait SyncMaintain: 'static + Clone + Send + Sync { - /// Run custom maintain procedures. This is guaranteed to be called when both source and target - /// clients are unoccupied. - async fn maintain(&self, _sync: &mut HeadersSync

) {} -} - -impl SyncMaintain

for () {} - -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. -pub fn metrics_prefix() -> String { - format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) -} - -/// Run headers synchronization. -#[allow(clippy::too_many_arguments)] -pub async fn run>( - source_client: impl SourceClient

, - source_tick: Duration, - target_client: TC, - target_tick: Duration, - sync_maintain: impl SyncMaintain

, - sync_params: HeadersSyncParams, - metrics_params: MetricsParams, - exit_signal: impl Future + 'static + Send, -) -> Result<(), String> { - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .with_metrics(Some(metrics_prefix::

()), metrics_params) - .loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))? - .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? - .expose() - .await? - .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost( - source_client, - source_tick, - target_client, - target_tick, - sync_maintain.clone(), - sync_params.clone(), - metrics, - exit_signal.clone(), - ) - }) - .await -} - -/// Run headers synchronization. -#[allow(clippy::too_many_arguments)] -async fn run_until_connection_lost>( - source_client: impl SourceClient

, - source_tick: Duration, - target_client: TC, - target_tick: Duration, - sync_maintain: impl SyncMaintain

, - sync_params: HeadersSyncParams, - metrics_sync: Option, - exit_signal: impl Future + Send, -) -> Result<(), FailedClient> { - let mut progress_context = (Instant::now(), None, None); - - let mut sync = HeadersSync::

::new(sync_params); - let mut stall_countdown = None; - let mut last_update_time = Instant::now(); - - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = false; - let mut source_best_block_number_required = false; - let source_best_block_number_future = source_client.best_block_number().fuse(); - let source_new_header_future = futures::future::Fuse::terminated(); - let source_orphan_header_future = futures::future::Fuse::terminated(); - let source_extra_future = futures::future::Fuse::terminated(); - let source_completion_future = futures::future::Fuse::terminated(); - let source_go_offline_future = futures::future::Fuse::terminated(); - let source_tick_stream = interval(source_tick).fuse(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = false; - let mut target_best_block_required = false; - let mut target_incomplete_headers_required = true; - let target_best_block_future = target_client.best_header_id().fuse(); - let target_incomplete_headers_future = futures::future::Fuse::terminated(); - let target_extra_check_future = futures::future::Fuse::terminated(); - let target_existence_status_future = futures::future::Fuse::terminated(); - let target_submit_header_future = futures::future::Fuse::terminated(); - let target_complete_header_future = futures::future::Fuse::terminated(); - let target_go_offline_future = futures::future::Fuse::terminated(); - let target_tick_stream = interval(target_tick).fuse(); - - let mut maintain_required = false; - let maintain_stream = interval(MAINTAIN_INTERVAL).fuse(); - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!( - source_best_block_number_future, - source_new_header_future, - source_orphan_header_future, - source_extra_future, - source_completion_future, - source_go_offline_future, - source_tick_stream, - target_best_block_future, - target_incomplete_headers_future, - target_extra_check_future, - target_existence_status_future, - target_submit_header_future, - target_complete_header_future, - target_go_offline_future, - target_tick_stream, - maintain_stream, - exit_signal - ); - - loop { - futures::select! { - source_best_block_number = source_best_block_number_future => { - source_best_block_number_required = false; - - source_client_is_online = process_future_result( - source_best_block_number, - &mut source_retry_backoff, - |source_best_block_number| sync.source_best_header_number_response(source_best_block_number), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best header number from {}", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_new_header = source_new_header_future => { - source_client_is_online = process_future_result( - source_new_header, - &mut source_retry_backoff, - |source_new_header| sync.headers_mut().header_response(source_new_header), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving header from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_orphan_header = source_orphan_header_future => { - source_client_is_online = process_future_result( - source_orphan_header, - &mut source_retry_backoff, - |source_orphan_header| sync.headers_mut().header_response(source_orphan_header), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving orphan header from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_extra = source_extra_future => { - source_client_is_online = process_future_result( - source_extra, - &mut source_retry_backoff, - |(header, extra)| sync.headers_mut().extra_response(&header, extra), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving extra data from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_completion = source_completion_future => { - source_client_is_online = process_future_result( - source_completion, - &mut source_retry_backoff, - |(header, completion)| sync.headers_mut().completion_response(&header, completion), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving completion data from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = source_tick_stream.next() => { - if sync.is_almost_synced() { - source_best_block_number_required = true; - } - }, - target_best_block = target_best_block_future => { - target_best_block_required = false; - - target_client_is_online = process_future_result( - target_best_block, - &mut target_retry_backoff, - |target_best_block| { - let head_updated = sync.target_best_header_response(target_best_block); - if head_updated { - last_update_time = Instant::now(); - } - match head_updated { - // IF head is updated AND there are still our transactions: - // => restart stall countdown timer - true if sync.headers().headers_in_status(HeaderStatus::Submitted) != 0 => - stall_countdown = Some(Instant::now()), - // IF head is updated AND there are no our transactions: - // => stop stall countdown timer - true => stall_countdown = None, - // IF head is not updated AND stall countdown is not yet completed - // => do nothing - false if stall_countdown - .map(|stall_countdown| stall_countdown.elapsed() < STALL_SYNC_TIMEOUT) - .unwrap_or(true) - => (), - // IF head is not updated AND stall countdown has completed - // => restart sync - false => { - log::info!( - target: "bridge", - "Sync has stalled. Restarting {} headers synchronization.", - P::SOURCE_NAME, - ); - stall_countdown = None; - sync.restart(); - }, - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best known {} header from {} node", P::SOURCE_NAME, P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - incomplete_headers_ids = target_incomplete_headers_future => { - target_incomplete_headers_required = false; - - target_client_is_online = process_future_result( - incomplete_headers_ids, - &mut target_retry_backoff, - |incomplete_headers_ids| sync.headers_mut().incomplete_headers_response(incomplete_headers_ids), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving incomplete headers from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - target_existence_status = target_existence_status_future => { - target_client_is_online = process_future_result( - target_existence_status, - &mut target_retry_backoff, - |(target_header, target_existence_status)| sync - .headers_mut() - .maybe_orphan_response(&target_header, target_existence_status), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving existence status from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - submitted_headers = target_submit_header_future => { - // following line helps Rust understand the type of `submitted_headers` :/ - let submitted_headers: SubmittedHeaders, TC::Error> = submitted_headers; - let submitted_headers_str = format!("{}", submitted_headers); - let all_headers_rejected = submitted_headers.submitted.is_empty() - && submitted_headers.incomplete.is_empty(); - let has_submitted_headers = sync.headers().headers_in_status(HeaderStatus::Submitted) != 0; - - let maybe_fatal_error = match submitted_headers.fatal_error { - Some(fatal_error) => Err(StringifiedMaybeConnectionError::new( - fatal_error.is_connection_error(), - format!("{:?}", fatal_error), - )), - None if all_headers_rejected && !has_submitted_headers => - Err(StringifiedMaybeConnectionError::new(false, "All headers were rejected".into())), - None => Ok(()), - }; - - let no_fatal_error = maybe_fatal_error.is_ok(); - target_client_is_online = process_future_result( - maybe_fatal_error, - &mut target_retry_backoff, - |_| {}, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error submitting headers to {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - - log::debug!(target: "bridge", "Header submit result: {}", submitted_headers_str); - - sync.headers_mut().headers_submitted(submitted_headers.submitted); - sync.headers_mut().add_incomplete_headers(false, submitted_headers.incomplete); - - // when there's no fatal error, but node has rejected all our headers we may - // want to pause until our submitted headers will be accepted - if no_fatal_error && all_headers_rejected && has_submitted_headers { - sync.pause_submit(); - } - }, - target_complete_header_result = target_complete_header_future => { - target_client_is_online = process_future_result( - target_complete_header_result, - &mut target_retry_backoff, - |completed_header| sync.headers_mut().header_completed(&completed_header), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error completing headers at {}", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - target_extra_check_result = target_extra_check_future => { - target_client_is_online = process_future_result( - target_extra_check_result, - &mut target_retry_backoff, - |(header, extra_check_result)| sync - .headers_mut() - .maybe_extra_response(&header, extra_check_result), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving receipts requirement from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - _ = target_tick_stream.next() => { - target_best_block_required = true; - target_incomplete_headers_required = true; - }, - - _ = maintain_stream.next() => { - maintain_required = true; - }, - _ = exit_signal => { - return Ok(()); - } - } - - // update metrics - if let Some(ref metrics_sync) = metrics_sync { - metrics_sync.update(&sync); - } - - // print progress - progress_context = print_sync_progress(progress_context, &sync); - - // run maintain procedures - if maintain_required && source_client_is_online && target_client_is_online { - log::debug!(target: "bridge", "Maintaining headers sync loop"); - maintain_required = false; - sync_maintain.maintain(&mut sync).await; - } - - // If the target client is accepting requests we update the requests that - // we want it to run - if !maintain_required && target_client_is_online { - // NOTE: Is is important to reset this so that we only have one - // request being processed by the client at a time. This prevents - // race conditions like receiving two transactions with the same - // nonce from the client. - target_client_is_online = false; - - // The following is how we prioritize requests: - // - // 1. Get best block - // - Stops us from downloading or submitting new blocks - // - Only called rarely - // - // 2. Get incomplete headers - // - Stops us from submitting new blocks - // - Only called rarely - // - // 3. Get complete headers - // - Stops us from submitting new blocks - // - // 4. Check if we need extra data from source - // - Stops us from downloading or submitting new blocks - // - // 5. Check existence of header - // - Stops us from submitting new blocks - // - // 6. Submit header - - if target_best_block_required { - log::debug!(target: "bridge", "Asking {} about best block", P::TARGET_NAME); - target_best_block_future.set(target_client.best_header_id().fuse()); - } else if target_incomplete_headers_required { - log::debug!(target: "bridge", "Asking {} about incomplete headers", P::TARGET_NAME); - target_incomplete_headers_future.set(target_client.incomplete_headers_ids().fuse()); - } else if let Some((id, completion)) = sync.headers_mut().header_to_complete() { - log::debug!( - target: "bridge", - "Going to complete header: {:?}", - id, - ); - - target_complete_header_future.set(target_client.complete_header(id, completion.clone()).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeExtra) { - log::debug!( - target: "bridge", - "Checking if header submission requires extra: {:?}", - header.id(), - ); - - target_extra_check_future.set(target_client.requires_extra(header.clone()).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeOrphan) { - // for MaybeOrphan we actually ask for parent' header existence - let parent_id = header.parent_id(); - - log::debug!( - target: "bridge", - "Asking {} node for existence of: {:?}", - P::TARGET_NAME, - parent_id, - ); - - target_existence_status_future.set(target_client.is_known_header(parent_id).fuse()); - } else if let Some(headers) = - sync.select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT) - { - log::debug!( - target: "bridge", - "Submitting {} header(s) to {} node: {:?}", - headers.len(), - P::TARGET_NAME, - format_ids(headers.iter().map(|header| header.id())), - ); - - let headers = headers.into_iter().cloned().collect(); - target_submit_header_future.set(target_client.submit_headers(headers).fuse()); - - // remember that we have submitted some headers - if stall_countdown.is_none() { - stall_countdown = Some(Instant::now()); - } - } else { - target_client_is_online = true; - } - } - - // If the source client is accepting requests we update the requests that - // we want it to run - if !maintain_required && source_client_is_online { - // NOTE: Is is important to reset this so that we only have one - // request being processed by the client at a time. This prevents - // race conditions like receiving two transactions with the same - // nonce from the client. - source_client_is_online = false; - - // The following is how we prioritize requests: - // - // 1. Get best block - // - Stops us from downloading or submitting new blocks - // - Only called rarely - // - // 2. Download completion data - // - Stops us from submitting new blocks - // - // 3. Download extra data - // - Stops us from submitting new blocks - // - // 4. Download missing headers - // - Stops us from downloading or submitting new blocks - // - // 5. Downloading new headers - - if source_best_block_number_required { - log::debug!(target: "bridge", "Asking {} node about best block number", P::SOURCE_NAME); - source_best_block_number_future.set(source_client.best_block_number().fuse()); - } else if let Some(id) = sync.headers_mut().incomplete_header() { - log::debug!( - target: "bridge", - "Retrieving completion data for header: {:?}", - id, - ); - source_completion_future.set(source_client.header_completion(id).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::Extra) { - let id = header.id(); - log::debug!( - target: "bridge", - "Retrieving extra data for header: {:?}", - id, - ); - source_extra_future.set(source_client.header_extra(id, header.clone()).fuse()); - } else if let Some(header) = sync.select_orphan_header_to_download() { - // for Orphan we actually ask for parent' header - let parent_id = header.parent_id(); - - // if we have end up with orphan header#0, then we are misconfigured - if parent_id.0.is_zero() { - log::error!( - target: "bridge", - "Misconfiguration. Genesis {} header is considered orphan by {} node", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - return Ok(()); - } - - log::debug!( - target: "bridge", - "Going to download orphan header from {} node: {:?}", - P::SOURCE_NAME, - parent_id, - ); - - source_orphan_header_future.set(source_client.header_by_hash(parent_id.1).fuse()); - } else if let Some(id) = sync.select_new_header_to_download() { - log::debug!( - target: "bridge", - "Going to download new header from {} node: {:?}", - P::SOURCE_NAME, - id, - ); - - source_new_header_future.set(source_client.header_by_number(id).fuse()); - } else { - source_client_is_online = true; - } - } - } -} - -/// Print synchronization progress. -fn print_sync_progress( - progress_context: (Instant, Option, Option), - eth_sync: &HeadersSync

, -) -> (Instant, Option, Option) { - let (prev_time, prev_best_header, prev_target_header) = progress_context; - let now_time = Instant::now(); - let (now_best_header, now_target_header) = eth_sync.status(); - - let need_update = now_time - prev_time > Duration::from_secs(10) - || match (prev_best_header, now_best_header) { - (Some(prev_best_header), Some(now_best_header)) => { - now_best_header.0.saturating_sub(prev_best_header) > 10.into() - } - _ => false, - }; - if !need_update { - return (prev_time, prev_best_header, prev_target_header); - } - - log::info!( - target: "bridge", - "Synced {:?} of {:?} headers", - now_best_header.map(|id| id.0), - now_target_header, - ); - (now_time, (*now_best_header).map(|id| id.0), *now_target_header) -} diff --git a/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs b/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs deleted file mode 100644 index 37dae1134042890420f43fc19dc8d7ca016e58b2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for headers synchronization relay loop. - -use crate::sync::HeadersSync; -use crate::sync_types::{HeaderStatus, HeadersSyncPipeline}; - -use num_traits::Zero; -use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; - -/// Headers sync metrics. -#[derive(Clone)] -pub struct SyncLoopMetrics { - /// Best syncing headers at "source" and "target" nodes. - best_block_numbers: GaugeVec, - /// Number of headers in given states (see `HeaderStatus`). - blocks_in_state: GaugeVec, -} - -impl SyncLoopMetrics { - /// Create and register headers loop metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { - Ok(SyncLoopMetrics { - best_block_numbers: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "best_block_numbers"), - "Best block numbers on source and target nodes", - ), - &["node"], - )?, - registry, - )?, - blocks_in_state: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "blocks_in_state"), - "Number of blocks in given state", - ), - &["state"], - )?, - registry, - )?, - }) - } -} - -impl SyncLoopMetrics { - /// Update best block number at source. - pub fn update_best_block_at_source>(&self, source_best_number: Number) { - self.best_block_numbers - .with_label_values(&["source"]) - .set(source_best_number.into()); - } - - /// Update best block number at target. - pub fn update_best_block_at_target>(&self, target_best_number: Number) { - self.best_block_numbers - .with_label_values(&["target"]) - .set(target_best_number.into()); - } - - /// Update metrics. - pub fn update(&self, sync: &HeadersSync

) { - let headers = sync.headers(); - let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero); - let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero); - - self.update_best_block_at_source(source_best_number); - self.update_best_block_at_target(target_best_number); - - self.blocks_in_state - .with_label_values(&["maybe_orphan"]) - .set(headers.headers_in_status(HeaderStatus::MaybeOrphan) as _); - self.blocks_in_state - .with_label_values(&["orphan"]) - .set(headers.headers_in_status(HeaderStatus::Orphan) as _); - self.blocks_in_state - .with_label_values(&["maybe_extra"]) - .set(headers.headers_in_status(HeaderStatus::MaybeExtra) as _); - self.blocks_in_state - .with_label_values(&["extra"]) - .set(headers.headers_in_status(HeaderStatus::Extra) as _); - self.blocks_in_state - .with_label_values(&["ready"]) - .set(headers.headers_in_status(HeaderStatus::Ready) as _); - self.blocks_in_state - .with_label_values(&["incomplete"]) - .set(headers.headers_in_status(HeaderStatus::Incomplete) as _); - self.blocks_in_state - .with_label_values(&["submitted"]) - .set(headers.headers_in_status(HeaderStatus::Submitted) as _); - } -} diff --git a/polkadot/bridges/relays/headers/src/sync_loop_tests.rs b/polkadot/bridges/relays/headers/src/sync_loop_tests.rs deleted file mode 100644 index 11f15778873b9dc56ab7f2a0740c6cc3646d7963..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers/src/sync_loop_tests.rs +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg(test)] - -use crate::sync_loop::{run, SourceClient, TargetClient}; -use crate::sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}; - -use async_trait::async_trait; -use backoff::backoff::Backoff; -use futures::{future::FutureExt, stream::StreamExt}; -use parking_lot::Mutex; -use relay_utils::{ - metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, HeaderId, - MaybeConnectionError, -}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::Duration, -}; - -pub type TestNumber = u64; -pub type TestHash = u64; -pub type TestHeaderId = HeaderId; -pub type TestExtra = u64; -pub type TestCompletion = u64; -pub type TestQueuedHeader = QueuedHeader; - -#[derive(Default, Debug, Clone, PartialEq)] -pub struct TestHeader { - pub hash: TestHash, - pub number: TestNumber, - pub parent_hash: TestHash, -} - -impl SourceHeader for TestHeader { - fn id(&self) -> TestHeaderId { - HeaderId(self.number, self.hash) - } - - fn parent_id(&self) -> TestHeaderId { - HeaderId(self.number - 1, self.parent_hash) - } -} - -#[derive(Debug, Clone)] -struct TestError(bool); - -impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - self.0 - } -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct TestHeadersSyncPipeline; - -impl HeadersSyncPipeline for TestHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Source"; - const TARGET_NAME: &'static str = "Target"; - - type Hash = TestHash; - type Number = TestNumber; - type Header = TestHeader; - type Extra = TestExtra; - type Completion = TestCompletion; - - fn estimate_size(_: &TestQueuedHeader) -> usize { - 0 - } -} - -enum SourceMethod { - BestBlockNumber, - HeaderByHash(TestHash), - HeaderByNumber(TestNumber), - HeaderCompletion(TestHeaderId), - HeaderExtra(TestHeaderId, TestQueuedHeader), -} - -#[derive(Clone)] -struct Source { - data: Arc>, - on_method_call: Arc, -} - -struct SourceData { - best_block_number: Result, - header_by_hash: HashMap, - header_by_number: HashMap, - provides_completion: bool, - provides_extra: bool, -} - -impl Source { - pub fn new( - best_block_id: TestHeaderId, - headers: Vec<(bool, TestHeader)>, - on_method_call: impl Fn(SourceMethod, &mut SourceData) + Send + Sync + 'static, - ) -> Self { - Source { - data: Arc::new(Mutex::new(SourceData { - best_block_number: Ok(best_block_id.0), - header_by_hash: headers - .iter() - .map(|(_, header)| (header.hash, header.clone())) - .collect(), - header_by_number: headers - .iter() - .filter_map(|(is_canonical, header)| { - if *is_canonical { - Some((header.hash, header.clone())) - } else { - None - } - }) - .collect(), - provides_completion: true, - provides_extra: true, - })), - on_method_call: Arc::new(on_method_call), - } - } -} - -#[async_trait] -impl RelayClient for Source { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unimplemented!() - } -} - -#[async_trait] -impl SourceClient for Source { - async fn best_block_number(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::BestBlockNumber, &mut *data); - data.best_block_number.clone() - } - - async fn header_by_hash(&self, hash: TestHash) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderByHash(hash), &mut *data); - data.header_by_hash.get(&hash).cloned().ok_or(TestError(false)) - } - - async fn header_by_number(&self, number: TestNumber) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderByNumber(number), &mut *data); - data.header_by_number.get(&number).cloned().ok_or(TestError(false)) - } - - async fn header_completion(&self, id: TestHeaderId) -> Result<(TestHeaderId, Option), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderCompletion(id), &mut *data); - if data.provides_completion { - Ok((id, Some(test_completion(id)))) - } else { - Ok((id, None)) - } - } - - async fn header_extra( - &self, - id: TestHeaderId, - header: TestQueuedHeader, - ) -> Result<(TestHeaderId, TestExtra), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderExtra(id, header), &mut *data); - if data.provides_extra { - Ok((id, test_extra(id))) - } else { - Err(TestError(false)) - } - } -} - -enum TargetMethod { - BestHeaderId, - IsKnownHeader(TestHeaderId), - SubmitHeaders(Vec), - IncompleteHeadersIds, - CompleteHeader(TestHeaderId, TestCompletion), - RequiresExtra(TestQueuedHeader), -} - -#[derive(Clone)] -struct Target { - data: Arc>, - on_method_call: Arc, -} - -struct TargetData { - best_header_id: Result, - is_known_header_by_hash: HashMap, - submitted_headers: HashMap, - submit_headers_result: Option>, - completed_headers: HashMap, - requires_completion: bool, - requires_extra: bool, -} - -impl Target { - pub fn new( - best_header_id: TestHeaderId, - headers: Vec, - on_method_call: impl Fn(TargetMethod, &mut TargetData) + Send + Sync + 'static, - ) -> Self { - Target { - data: Arc::new(Mutex::new(TargetData { - best_header_id: Ok(best_header_id), - is_known_header_by_hash: headers.iter().map(|header| (header.1, true)).collect(), - submitted_headers: HashMap::new(), - submit_headers_result: None, - completed_headers: HashMap::new(), - requires_completion: false, - requires_extra: false, - })), - on_method_call: Arc::new(on_method_call), - } - } -} - -#[async_trait] -impl RelayClient for Target { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unimplemented!() - } -} - -#[async_trait] -impl TargetClient for Target { - async fn best_header_id(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::BestHeaderId, &mut *data); - data.best_header_id.clone() - } - - async fn is_known_header(&self, id: TestHeaderId) -> Result<(TestHeaderId, bool), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::IsKnownHeader(id), &mut *data); - data.is_known_header_by_hash - .get(&id.1) - .cloned() - .map(|is_known_header| Ok((id, is_known_header))) - .unwrap_or(Ok((id, false))) - } - - async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::SubmitHeaders(headers.clone()), &mut *data); - data.submitted_headers - .extend(headers.iter().map(|header| (header.id().1, header.clone()))); - data.submit_headers_result.take().expect("test must accept headers") - } - - async fn incomplete_headers_ids(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::IncompleteHeadersIds, &mut *data); - if data.requires_completion { - Ok(data - .submitted_headers - .iter() - .filter(|(hash, _)| !data.completed_headers.contains_key(hash)) - .map(|(_, header)| header.id()) - .collect()) - } else { - Ok(HashSet::new()) - } - } - - async fn complete_header(&self, id: TestHeaderId, completion: TestCompletion) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::CompleteHeader(id, completion), &mut *data); - data.completed_headers.insert(id.1, completion); - Ok(id) - } - - async fn requires_extra(&self, header: TestQueuedHeader) -> Result<(TestHeaderId, bool), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::RequiresExtra(header.clone()), &mut *data); - if data.requires_extra { - Ok((header.id(), true)) - } else { - Ok((header.id(), false)) - } - } -} - -fn test_tick() -> Duration { - // in ideal world that should have been Duration::from_millis(0), because we do not want - // to sleep in tests at all, but that could lead to `select! {}` always waking on tick - // => not doing actual job - Duration::from_millis(10) -} - -fn test_id(number: TestNumber) -> TestHeaderId { - HeaderId(number, number) -} - -fn test_header(number: TestNumber) -> TestHeader { - let id = test_id(number); - TestHeader { - hash: id.1, - number: id.0, - parent_hash: if number == 0 { - TestHash::default() - } else { - test_id(number - 1).1 - }, - } -} - -fn test_forked_id(number: TestNumber, forked_from: TestNumber) -> TestHeaderId { - const FORK_OFFSET: TestNumber = 1000; - - if number == forked_from { - HeaderId(number, number) - } else { - HeaderId(number, FORK_OFFSET + number) - } -} - -fn test_forked_header(number: TestNumber, forked_from: TestNumber) -> TestHeader { - let id = test_forked_id(number, forked_from); - TestHeader { - hash: id.1, - number: id.0, - parent_hash: if number == 0 { - TestHash::default() - } else { - test_forked_id(number - 1, forked_from).1 - }, - } -} - -fn test_completion(id: TestHeaderId) -> TestCompletion { - id.0 -} - -fn test_extra(id: TestHeaderId) -> TestExtra { - id.0 -} - -fn source_reject_completion(method: &SourceMethod) { - if let SourceMethod::HeaderCompletion(_) = method { - unreachable!("HeaderCompletion request is not expected") - } -} - -fn source_reject_extra(method: &SourceMethod) { - if let SourceMethod::HeaderExtra(_, _) = method { - unreachable!("HeaderExtra request is not expected") - } -} - -fn target_accept_all_headers(method: &TargetMethod, data: &mut TargetData, requires_extra: bool) { - if let TargetMethod::SubmitHeaders(ref submitted) = method { - assert_eq!(submitted.iter().all(|header| header.extra().is_some()), requires_extra); - - data.submit_headers_result = Some(SubmittedHeaders { - submitted: submitted.iter().map(|header| header.id()).collect(), - ..Default::default() - }); - } -} - -fn target_signal_exit_when_header_submitted( - method: &TargetMethod, - header_id: TestHeaderId, - exit_signal: &futures::channel::mpsc::UnboundedSender<()>, -) { - if let TargetMethod::SubmitHeaders(ref submitted) = method { - if submitted.iter().any(|header| header.id() == header_id) { - exit_signal.unbounded_send(()).unwrap(); - } - } -} - -fn target_signal_exit_when_header_completed( - method: &TargetMethod, - header_id: TestHeaderId, - exit_signal: &futures::channel::mpsc::UnboundedSender<()>, -) { - if let TargetMethod::CompleteHeader(completed_id, _) = method { - if *completed_id == header_id { - exit_signal.unbounded_send(()).unwrap(); - } - } -} - -fn run_backoff_test(result: Result<(), TestError>) -> (Duration, Duration) { - let mut backoff = retry_backoff(); - - // no randomness in tests (otherwise intervals may overlap => asserts are failing) - backoff.randomization_factor = 0f64; - - // increase backoff's current interval - let interval1 = backoff.next_backoff().unwrap(); - let interval2 = backoff.next_backoff().unwrap(); - assert!(interval2 > interval1); - - // successful future result leads to backoff's reset - let go_offline_future = futures::future::Fuse::terminated(); - futures::pin_mut!(go_offline_future); - - process_future_result( - result, - &mut backoff, - |_| {}, - &mut go_offline_future, - async_std::task::sleep, - || "Test error".into(), - ); - - (interval2, backoff.next_backoff().unwrap()) -} - -#[test] -fn process_future_result_resets_backoff_on_success() { - let (interval2, interval_after_reset) = run_backoff_test(Ok(())); - assert!(interval2 > interval_after_reset); -} - -#[test] -fn process_future_result_resets_backoff_on_connection_error() { - let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(true))); - assert!(interval2 > interval_after_reset); -} - -#[test] -fn process_future_result_does_not_reset_backoff_on_non_connection_error() { - let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(false))); - assert!(interval2 < interval_after_reset); -} - -struct SyncLoopTestParams { - best_source_header: TestHeader, - headers_on_source: Vec<(bool, TestHeader)>, - best_target_header: TestHeader, - headers_on_target: Vec, - target_requires_extra: bool, - target_requires_completion: bool, - stop_at: TestHeaderId, -} - -fn run_sync_loop_test(params: SyncLoopTestParams) { - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - let target_requires_extra = params.target_requires_extra; - let target_requires_completion = params.target_requires_completion; - let stop_at = params.stop_at; - let source = Source::new( - params.best_source_header.id(), - params.headers_on_source, - move |method, _| { - if !target_requires_extra { - source_reject_extra(&method); - } - if !target_requires_completion { - source_reject_completion(&method); - } - }, - ); - let target = Target::new( - params.best_target_header.id(), - params.headers_on_target.into_iter().map(|header| header.id()).collect(), - move |method, data| { - target_accept_all_headers(&method, data, target_requires_extra); - if target_requires_completion { - target_signal_exit_when_header_completed(&method, stop_at, &exit_sender); - } else { - target_signal_exit_when_header_submitted(&method, stop_at, &exit_sender); - } - }, - ); - target.data.lock().requires_extra = target_requires_extra; - target.data.lock().requires_completion = target_requires_completion; - - let _ = async_std::task::block_on(run( - source, - test_tick(), - target, - test_tick(), - (), - crate::sync::tests::default_sync_params(), - MetricsParams::disabled(), - exit_receiver.into_future().map(|(_, _)| ()), - )); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header_with_extra() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: true, - target_requires_completion: false, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header_with_completion() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: false, - target_requires_completion: true, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_reorganize_from_shorter_fork() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(3), - headers_on_source: vec![ - (true, test_header(1)), - (true, test_header(2)), - (true, test_header(3)), - (false, test_forked_header(1, 0)), - (false, test_forked_header(2, 0)), - ], - best_target_header: test_forked_header(2, 0), - headers_on_target: vec![test_header(0), test_forked_header(1, 0), test_forked_header(2, 0)], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(3), - }); -} - -#[test] -fn sync_loop_is_able_to_reorganize_from_longer_fork() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(3), - headers_on_source: vec![ - (true, test_header(1)), - (true, test_header(2)), - (true, test_header(3)), - (false, test_forked_header(1, 0)), - (false, test_forked_header(2, 0)), - (false, test_forked_header(3, 0)), - (false, test_forked_header(4, 0)), - (false, test_forked_header(5, 0)), - ], - best_target_header: test_forked_header(5, 0), - headers_on_target: vec![ - test_header(0), - test_forked_header(1, 0), - test_forked_header(2, 0), - test_forked_header(3, 0), - test_forked_header(4, 0), - test_forked_header(5, 0), - ], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(3), - }); -} diff --git a/polkadot/bridges/relays/headers/src/sync_types.rs b/polkadot/bridges/relays/headers/src/sync_types.rs deleted file mode 100644 index 5809ebab59e1c47b1b9e8a02c690ee3523bc3521..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers/src/sync_types.rs +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that are used by headers synchronization components. - -use relay_utils::{format_ids, HeaderId}; -use std::{ops::Deref, sync::Arc}; - -/// Ethereum header synchronization status. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum HeaderStatus { - /// Header is unknown. - Unknown, - /// Header is in MaybeOrphan queue. - MaybeOrphan, - /// Header is in Orphan queue. - Orphan, - /// Header is in MaybeExtra queue. - MaybeExtra, - /// Header is in Extra queue. - Extra, - /// Header is in Ready queue. - Ready, - /// Header is in Incomplete queue. - Incomplete, - /// Header has been recently submitted to the target node. - Submitted, - /// Header is known to the target node. - Synced, -} - -/// Headers synchronization pipeline. -pub trait HeadersSyncPipeline: 'static + Clone + Send + Sync { - /// Name of the headers source. - const SOURCE_NAME: &'static str; - /// Name of the headers target. - const TARGET_NAME: &'static str; - - /// Headers we're syncing are identified by this hash. - type Hash: Eq + Clone + Copy + Send + Sync + std::fmt::Debug + std::fmt::Display + std::hash::Hash; - /// Headers we're syncing are identified by this number. - type Number: relay_utils::BlockNumberBase; - /// Type of header that we're syncing. - type Header: SourceHeader; - /// Type of extra data for the header that we're receiving from the source node: - /// 1) extra data is required for some headers; - /// 2) target node may answer if it'll require extra data before header is submitted; - /// 3) extra data available since the header creation time; - /// 4) header and extra data are submitted in single transaction. - /// - /// Example: Ethereum transactions receipts. - type Extra: Clone + Send + Sync + PartialEq + std::fmt::Debug; - /// Type of data required to 'complete' header that we're receiving from the source node: - /// 1) completion data is required for some headers; - /// 2) target node can't answer if it'll require completion data before header is accepted; - /// 3) completion data may be generated after header generation; - /// 4) header and completion data are submitted in separate transactions. - /// - /// Example: Substrate GRANDPA justifications. - type Completion: Clone + Send + Sync + std::fmt::Debug; - - /// Function used to estimate size of target-encoded header. - fn estimate_size(source: &QueuedHeader) -> usize; -} - -/// A HeaderId for `HeaderSyncPipeline`. -pub type HeaderIdOf

= HeaderId<

::Hash,

::Number>; - -/// Header that we're receiving from source node. -pub trait SourceHeader: Clone + std::fmt::Debug + PartialEq + Send + Sync { - /// Returns ID of header. - fn id(&self) -> HeaderId; - /// Returns ID of parent header. - /// - /// Panics if called for genesis header. - fn parent_id(&self) -> HeaderId; -} - -/// Header how it's stored in the synchronization queue. -#[derive(Clone, Debug, PartialEq)] -pub struct QueuedHeader(Arc>); - -impl QueuedHeader

{ - /// Creates new queued header. - pub fn new(header: P::Header) -> Self { - QueuedHeader(Arc::new(QueuedHeaderData { header, extra: None })) - } - - /// Set associated extra data. - pub fn set_extra(self, extra: P::Extra) -> Self { - QueuedHeader(Arc::new(QueuedHeaderData { - header: Arc::try_unwrap(self.0) - .map(|data| data.header) - .unwrap_or_else(|data| data.header.clone()), - extra: Some(extra), - })) - } -} - -impl Deref for QueuedHeader

{ - type Target = QueuedHeaderData

; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Header how it's stored in the synchronization queue. -#[derive(Clone, Debug, Default, PartialEq)] -pub struct QueuedHeaderData { - header: P::Header, - extra: Option, -} - -impl QueuedHeader

{ - /// Returns ID of header. - pub fn id(&self) -> HeaderId { - self.header.id() - } - - /// Returns ID of parent header. - pub fn parent_id(&self) -> HeaderId { - self.header.parent_id() - } - - /// Returns reference to header. - pub fn header(&self) -> &P::Header { - &self.header - } - - /// Returns reference to associated extra data. - pub fn extra(&self) -> &Option { - &self.extra - } -} - -/// Headers submission result. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct SubmittedHeaders { - /// IDs of headers that have been submitted to target node. - pub submitted: Vec, - /// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted` vec), - /// but all descendants are not. - pub incomplete: Vec, - /// IDs of ignored headers that we have decided not to submit (they're either rejected by - /// target node immediately, or they're descendants of incomplete headers). - pub rejected: Vec, - /// Fatal target node error, if it has occured during submission. - pub fatal_error: Option, -} - -impl Default for SubmittedHeaders { - fn default() -> Self { - SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - } - } -} - -impl std::fmt::Display for SubmittedHeaders { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let submitted = format_ids(self.submitted.iter()); - let incomplete = format_ids(self.incomplete.iter()); - let rejected = format_ids(self.rejected.iter()); - - write!( - f, - "Submitted: {}, Incomplete: {}, Rejected: {}", - submitted, incomplete, rejected - ) - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml b/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..5bee10856daa3c64adf32fef6cb62107bcd6ce3b --- /dev/null +++ b/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "substrate-relay-helper" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +anyhow = "1.0" +thiserror = "1.0.26" +async-std = "1.9.0" +async-trait = "0.1.42" +codec = { package = "parity-scale-codec", version = "2.2.0" } +futures = "0.3.12" +num-traits = "0.2" +log = "0.4.14" + + +# Bridge dependencies + +bp-header-chain = { path = "../../primitives/header-chain" } +bridge-runtime-common = { path = "../../bin/runtime-common" } + +finality-grandpa = { version = "0.14.0" } +finality-relay = { path = "../finality" } +relay-utils = { path = "../utils" } +messages-relay = { path = "../messages" } +relay-substrate-client = { path = "../client-substrate" } + +pallet-bridge-messages = { path = "../../modules/messages" } + +bp-runtime = { path = "../../primitives/runtime" } +bp-messages = { path = "../../primitives/messages" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +bp-millau = { path = "../../primitives/chain-millau" } +bp-rococo = { path = "../../primitives/chain-rococo" } +bp-wococo = { path = "../../primitives/chain-wococo" } +relay-rococo-client = { path = "../client-rococo" } +relay-wococo-client = { path = "../client-wococo" } +rialto-runtime = { path = "../../bin/rialto/runtime" } diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs b/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs new file mode 100644 index 0000000000000000000000000000000000000000..93458457d34c9dc4213aa71817d8cb6f73ef6a76 --- /dev/null +++ b/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs @@ -0,0 +1,243 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools for updating conversion rate that is stored in the runtime storage. + +use relay_utils::metrics::F64SharedRef; +use std::{future::Future, time::Duration}; + +/// Duration between updater iterations. +const SLEEP_DURATION: Duration = Duration::from_secs(60); + +/// Update-conversion-rate transaction status. +#[derive(Debug, Clone, Copy, PartialEq)] +enum TransactionStatus { + /// We have not submitted any transaction recently. + Idle, + /// We have recently submitted transaction that should update conversion rate. + Submitted(f64), +} + +/// Run infinite conversion rate updater loop. +/// +/// The loop is maintaining the Left -> Right conversion rate, used as `RightTokens = LeftTokens * +/// Rate`. +pub fn run_conversion_rate_update_loop< + SubmitConversionRateFuture: Future> + Send + 'static, +>( + left_to_right_stored_conversion_rate: F64SharedRef, + left_to_base_conversion_rate: F64SharedRef, + right_to_base_conversion_rate: F64SharedRef, + max_difference_ratio: f64, + submit_conversion_rate: impl Fn(f64) -> SubmitConversionRateFuture + Send + 'static, +) { + async_std::task::spawn(async move { + let mut transaction_status = TransactionStatus::Idle; + loop { + async_std::task::sleep(SLEEP_DURATION).await; + let maybe_new_conversion_rate = maybe_select_new_conversion_rate( + &mut transaction_status, + &left_to_right_stored_conversion_rate, + &left_to_base_conversion_rate, + &right_to_base_conversion_rate, + max_difference_ratio, + ) + .await; + if let Some((prev_conversion_rate, new_conversion_rate)) = maybe_new_conversion_rate { + let submit_conversion_rate_future = submit_conversion_rate(new_conversion_rate); + match submit_conversion_rate_future.await { + Ok(()) => { + transaction_status = TransactionStatus::Submitted(prev_conversion_rate); + }, + Err(error) => { + log::trace!(target: "bridge", "Failed to submit conversion rate update transaction: {:?}", error); + }, + } + } + } + }); +} + +/// Select new conversion rate to submit to the node. +async fn maybe_select_new_conversion_rate( + transaction_status: &mut TransactionStatus, + left_to_right_stored_conversion_rate: &F64SharedRef, + left_to_base_conversion_rate: &F64SharedRef, + right_to_base_conversion_rate: &F64SharedRef, + max_difference_ratio: f64, +) -> Option<(f64, f64)> { + let left_to_right_stored_conversion_rate = + (*left_to_right_stored_conversion_rate.read().await)?; + match *transaction_status { + TransactionStatus::Idle => (), + TransactionStatus::Submitted(previous_left_to_right_stored_conversion_rate) => { + // we can't compare float values from different sources directly, so we only care + // whether the stored rate has been changed or not. If it has been changed, then we + // assume that our proposal has been accepted. + // + // float comparison is ok here, because we compare same-origin (stored in runtime + // storage) values and if they are different, it means that the value has actually been + // updated + #[allow(clippy::float_cmp)] + if previous_left_to_right_stored_conversion_rate == left_to_right_stored_conversion_rate + { + // the rate has not been changed => we won't submit any transactions until it is + // accepted, or the rate is changed by someone else + return None + } + + *transaction_status = TransactionStatus::Idle; + }, + } + + let left_to_base_conversion_rate = (*left_to_base_conversion_rate.read().await)?; + let right_to_base_conversion_rate = (*right_to_base_conversion_rate.read().await)?; + let actual_left_to_right_conversion_rate = + right_to_base_conversion_rate / left_to_base_conversion_rate; + + let rate_difference = + (actual_left_to_right_conversion_rate - left_to_right_stored_conversion_rate).abs(); + let rate_difference_ratio = rate_difference / left_to_right_stored_conversion_rate; + if rate_difference_ratio < max_difference_ratio { + return None + } + + Some((left_to_right_stored_conversion_rate, actual_left_to_right_conversion_rate)) +} + +#[cfg(test)] +mod tests { + use super::*; + use async_std::sync::{Arc, RwLock}; + + fn test_maybe_select_new_conversion_rate( + mut transaction_status: TransactionStatus, + stored_conversion_rate: Option, + left_to_base_conversion_rate: Option, + right_to_base_conversion_rate: Option, + max_difference_ratio: f64, + ) -> (Option<(f64, f64)>, TransactionStatus) { + let stored_conversion_rate = Arc::new(RwLock::new(stored_conversion_rate)); + let left_to_base_conversion_rate = Arc::new(RwLock::new(left_to_base_conversion_rate)); + let right_to_base_conversion_rate = Arc::new(RwLock::new(right_to_base_conversion_rate)); + let result = async_std::task::block_on(maybe_select_new_conversion_rate( + &mut transaction_status, + &stored_conversion_rate, + &left_to_base_conversion_rate, + &right_to_base_conversion_rate, + max_difference_ratio, + )); + (result, transaction_status) + } + + #[test] + fn rate_is_not_updated_when_transaction_is_submitted() { + assert_eq!( + test_maybe_select_new_conversion_rate( + TransactionStatus::Submitted(10.0), + Some(10.0), + Some(1.0), + Some(1.0), + 0.0 + ), + (None, TransactionStatus::Submitted(10.0)), + ); + } + + #[test] + fn transaction_state_is_changed_to_idle_when_stored_rate_shanges() { + assert_eq!( + test_maybe_select_new_conversion_rate( + TransactionStatus::Submitted(1.0), + Some(10.0), + Some(1.0), + Some(1.0), + 100.0 + ), + (None, TransactionStatus::Idle), + ); + } + + #[test] + fn transaction_is_not_submitted_when_left_to_base_rate_is_unknown() { + assert_eq!( + test_maybe_select_new_conversion_rate( + TransactionStatus::Idle, + Some(10.0), + None, + Some(1.0), + 0.0 + ), + (None, TransactionStatus::Idle), + ); + } + + #[test] + fn transaction_is_not_submitted_when_right_to_base_rate_is_unknown() { + assert_eq!( + test_maybe_select_new_conversion_rate( + TransactionStatus::Idle, + Some(10.0), + Some(1.0), + None, + 0.0 + ), + (None, TransactionStatus::Idle), + ); + } + + #[test] + fn transaction_is_not_submitted_when_stored_rate_is_unknown() { + assert_eq!( + test_maybe_select_new_conversion_rate( + TransactionStatus::Idle, + None, + Some(1.0), + Some(1.0), + 0.0 + ), + (None, TransactionStatus::Idle), + ); + } + + #[test] + fn transaction_is_not_submitted_when_difference_is_below_threshold() { + assert_eq!( + test_maybe_select_new_conversion_rate( + TransactionStatus::Idle, + Some(1.0), + Some(1.0), + Some(1.01), + 0.02 + ), + (None, TransactionStatus::Idle), + ); + } + + #[test] + fn transaction_is_submitted_when_difference_is_above_threshold() { + assert_eq!( + test_maybe_select_new_conversion_rate( + TransactionStatus::Idle, + Some(1.0), + Some(1.0), + Some(1.03), + 0.02 + ), + (Some((1.0, 1.03)), TransactionStatus::Idle), + ); + } +} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/error.rs b/polkadot/bridges/relays/lib-substrate-relay/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..802499503563dcbee09a6fa058520880bdc8d918 --- /dev/null +++ b/polkadot/bridges/relays/lib-substrate-relay/src/error.rs @@ -0,0 +1,58 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relay errors. + +use relay_substrate_client as client; +use sp_finality_grandpa::AuthorityList; +use sp_runtime::traits::MaybeDisplay; +use std::fmt::Debug; +use thiserror::Error; + +/// Relay errors. +#[derive(Error, Debug)] +pub enum Error { + /// Failed to submit signed extrinsic from to the target chain. + #[error("Failed to submit {0} transaction: {1:?}")] + SubmitTransaction(&'static str, client::Error), + /// Failed subscribe to justification stream of the source chain. + #[error("Failed to subscribe to {0} justifications: {1:?}")] + Subscribe(&'static str, client::Error), + /// Failed subscribe to read justification from the source chain (client error). + #[error("Failed to read {0} justification from the stream: {1}")] + ReadJustification(&'static str, client::Error), + /// Failed subscribe to read justification from the source chain (stream ended). + #[error("Failed to read {0} justification from the stream: stream has ended unexpectedly")] + ReadJustificationStreamEnded(&'static str), + /// Failed subscribe to decode justification from the source chain. + #[error("Failed to decode {0} justification: {1:?}")] + DecodeJustification(&'static str, codec::Error), + /// GRANDPA authorities read from the source chain are invalid. + #[error("Read invalid {0} authorities set: {1:?}")] + ReadInvalidAuthorities(&'static str, AuthorityList), + /// Failed to guess initial GRANDPA authorities at the given header of the source chain. + #[error("Failed to guess initial {0} GRANDPA authorities set id: checked all possible ids in range [0; {1}]")] + GuessInitialAuthorities(&'static str, HeaderNumber), + /// Failed to retrieve GRANDPA authorities at the given header from the source chain. + #[error("Failed to retrive {0} GRANDPA authorities set at header {1}: {2:?}")] + RetrieveAuthorities(&'static str, Hash, client::Error), + /// Failed to decode GRANDPA authorities at the given header of the source chain. + #[error("Failed to decode {0} GRANDPA authorities set at header {1}: {2:?}")] + DecodeAuthorities(&'static str, Hash, codec::Error), + /// Failed to retrieve header by the hash from the source chain. + #[error("Failed to retrieve {0} header with hash {1}: {:?}")] + RetrieveHeader(&'static str, Hash, client::Error), +} diff --git a/polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs similarity index 72% rename from polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs rename to polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs index 19fa0917df3904448aa014c0918cd1cb71bb9088..cdfbb3354d27412853992e9fd0a9e7c3d66cc88d 100644 --- a/polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs +++ b/polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs @@ -16,17 +16,18 @@ //! Substrate-to-Substrate headers sync entrypoint. -use crate::finality_target::SubstrateFinalityTarget; +use crate::{finality_target::SubstrateFinalityTarget, STALL_TIMEOUT}; use bp_header_chain::justification::GrandpaJustification; +use bp_runtime::AccountIdOf; use finality_relay::{FinalitySyncParams, FinalitySyncPipeline}; -use relay_substrate_client::{finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader}; +use relay_substrate_client::{ + finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader, +}; use relay_utils::{metrics::MetricsParams, BlockNumberBase}; use sp_core::Bytes; -use std::{fmt::Debug, marker::PhantomData, time::Duration}; +use std::{fmt::Debug, marker::PhantomData}; -/// Default synchronization loop timeout. -pub(crate) const STALL_TIMEOUT: Duration = Duration::from_secs(120); /// Default limit of recent finality proofs. /// /// Finality delay of 4096 blocks is unlikely to happen in practice in @@ -34,7 +35,10 @@ pub(crate) const STALL_TIMEOUT: Duration = Duration::from_secs(120); pub(crate) const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; /// Headers sync pipeline for Substrate <-> Substrate relays. -pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline { +pub trait SubstrateFinalitySyncPipeline: 'static + Clone + Debug + Send + Sync { + /// Pipeline for syncing finalized Source chain headers to Target chain. + type FinalitySyncPipeline: FinalitySyncPipeline; + /// Name of the runtime method that returns id of best finalized source header at target chain. const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str; @@ -49,19 +53,20 @@ pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline { /// Start finality relay guards. /// /// Different finality bridges may have different set of guards - e.g. on ephemeral chains we - /// don't need version guards, on test chains we don't care that much about relayer account + /// don't need a version guards, on test chains we don't care that much about relayer account /// balance, ... So the implementation is left to the specific bridges. fn start_relay_guards(&self) {} /// Returns id of account that we're using to sign transactions at target chain. - fn transactions_author(&self) -> ::AccountId; + fn transactions_author(&self) -> AccountIdOf; /// Make submit header transaction. fn make_submit_finality_proof_transaction( &self, - transaction_nonce: ::Index, - header: Self::Header, - proof: Self::FinalityProof, + era: bp_runtime::TransactionEraOf, + transaction_nonce: bp_runtime::IndexOf, + header: ::Header, + proof: ::FinalityProof, ) -> Bytes; } @@ -69,9 +74,9 @@ pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline { #[derive(Clone)] pub struct SubstrateFinalityToSubstrate { /// Client for the target chain. - pub(crate) target_client: Client, + pub target_client: Client, /// Data required to sign target chain transactions. - pub(crate) target_sign: TargetSign, + pub target_sign: TargetSign, /// Unused generic arguments dump. _marker: PhantomData, } @@ -86,14 +91,12 @@ impl Debug } } -impl SubstrateFinalityToSubstrate { +impl + SubstrateFinalityToSubstrate +{ /// Create new Substrate-to-Substrate headers pipeline. pub fn new(target_client: Client, target_sign: TargetSign) -> Self { - SubstrateFinalityToSubstrate { - target_client, - target_sign, - _marker: Default::default(), - } + SubstrateFinalityToSubstrate { target_client, target_sign, _marker: Default::default() } } } @@ -120,15 +123,16 @@ pub async fn run( source_client: Client, target_client: Client, only_mandatory_headers: bool, + transactions_mortality: Option, metrics_params: MetricsParams, ) -> anyhow::Result<()> where - P: SubstrateFinalitySyncPipeline< + P: SubstrateFinalitySyncPipeline, + P::FinalitySyncPipeline: FinalitySyncPipeline< Hash = HashOf, Number = BlockNumberOf, Header = SyncHeader, FinalityProof = GrandpaJustification, - TargetChain = TargetChain, >, SourceChain: Clone + Chain, BlockNumberOf: BlockNumberBase, @@ -143,11 +147,18 @@ where finality_relay::run( FinalitySource::new(source_client, None), - SubstrateFinalityTarget::new(target_client, pipeline), + SubstrateFinalityTarget::new(target_client, pipeline, transactions_mortality), FinalitySyncParams { - tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL), + tick: std::cmp::max( + SourceChain::AVERAGE_BLOCK_INTERVAL, + TargetChain::AVERAGE_BLOCK_INTERVAL, + ), recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, - stall_timeout: STALL_TIMEOUT, + stall_timeout: relay_substrate_client::transaction_stall_timeout( + transactions_mortality, + TargetChain::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ), only_mandatory_headers, }, metrics_params, diff --git a/polkadot/bridges/relays/bin-substrate/src/finality_target.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs similarity index 58% rename from polkadot/bridges/relays/bin-substrate/src/finality_target.rs rename to polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs index ffa10cabacbfa53822841fbf419deefa78a895e4..f50bd103f4300c0e78024b6cd50c81f5f83233cf 100644 --- a/polkadot/bridges/relays/bin-substrate/src/finality_target.rs +++ b/polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs @@ -22,7 +22,7 @@ use crate::finality_pipeline::SubstrateFinalitySyncPipeline; use async_trait::async_trait; use codec::Decode; -use finality_relay::TargetClient; +use finality_relay::{FinalitySyncPipeline, TargetClient}; use relay_substrate_client::{Chain, Client, Error as SubstrateError}; use relay_utils::relay_loop::Client as RelayClient; @@ -30,12 +30,13 @@ use relay_utils::relay_loop::Client as RelayClient; pub struct SubstrateFinalityTarget { client: Client, pipeline: P, + transactions_mortality: Option, } impl SubstrateFinalityTarget { /// Create new Substrate headers target. - pub fn new(client: Client, pipeline: P) -> Self { - SubstrateFinalityTarget { client, pipeline } + pub fn new(client: Client, pipeline: P, transactions_mortality: Option) -> Self { + SubstrateFinalityTarget { client, pipeline, transactions_mortality } } } @@ -44,6 +45,7 @@ impl Clone for SubstrateFinalityTarg SubstrateFinalityTarget { client: self.client.clone(), pipeline: self.pipeline.clone(), + transactions_mortality: self.transactions_mortality, } } } @@ -58,33 +60,53 @@ impl RelayClient for SubstrateFinali } #[async_trait] -impl TargetClient

for SubstrateFinalityTarget +impl TargetClient for SubstrateFinalityTarget where C: Chain, - P::Number: Decode, - P::Hash: Decode, P: SubstrateFinalitySyncPipeline, + ::Number: Decode, + ::Hash: Decode, { - async fn best_finalized_source_block_number(&self) -> Result { + async fn best_finalized_source_block_number( + &self, + ) -> Result<::Number, SubstrateError> { // we can't continue to relay finality if target node is out of sync, because // it may have already received (some of) headers that we're going to relay self.client.ensure_synced().await?; - Ok(crate::messages_source::read_client_state::( - &self.client, - P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET, - ) + Ok(crate::messages_source::read_client_state::< + C, + ::Hash, + ::Number, + >(&self.client, P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET) .await? .best_finalized_peer_at_best_self .0) } - async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), SubstrateError> { + async fn submit_finality_proof( + &self, + header: ::Header, + proof: ::FinalityProof, + ) -> Result<(), SubstrateError> { + let transactions_author = self.pipeline.transactions_author(); + let pipeline = self.pipeline.clone(); + let transactions_mortality = self.transactions_mortality; self.client - .submit_signed_extrinsic(self.pipeline.transactions_author(), move |transaction_nonce| { - self.pipeline - .make_submit_finality_proof_transaction(transaction_nonce, header, proof) - }) + .submit_signed_extrinsic( + transactions_author, + move |best_block_id, transaction_nonce| { + pipeline.make_submit_finality_proof_transaction( + relay_substrate_client::TransactionEra::new( + best_block_id, + transactions_mortality, + ), + transaction_nonce, + header, + proof, + ) + }, + ) .await .map(drop) } diff --git a/polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs b/polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs similarity index 72% rename from polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs rename to polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs index c2eab1bd3534e8f3be8285f09bcc0ac57fab6ac5..2e802c4cb215078cb121449d18093c90d7580d2b 100644 --- a/polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs +++ b/polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs @@ -21,10 +21,12 @@ //! and authorities set from source to target chain. The headers sync starts //! with this header. -use bp_header_chain::InitializationData; +use crate::error::Error; + use bp_header_chain::{ find_grandpa_authorities_scheduled_change, justification::{verify_justification, GrandpaJustification}, + InitializationData, }; use codec::Decode; use finality_grandpa::voter_set::VoterSet; @@ -39,7 +41,9 @@ pub async fn initialize( source_client: Client, target_client: Client, target_transactions_signer: TargetChain::AccountId, - prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes, + prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes + + Send + + 'static, ) { let result = do_initialize( source_client, @@ -72,8 +76,10 @@ async fn do_initialize( source_client: Client, target_client: Client, target_transactions_signer: TargetChain::AccountId, - prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes, -) -> Result { + prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes + + Send + + 'static, +) -> Result::Number>> { let initialization_data = prepare_initialization_data(source_client).await?; log::info!( target: "bridge", @@ -84,40 +90,44 @@ async fn do_initialize( ); let initialization_tx_hash = target_client - .submit_signed_extrinsic(target_transactions_signer, move |transaction_nonce| { + .submit_signed_extrinsic(target_transactions_signer, move |_, transaction_nonce| { prepare_initialize_transaction(transaction_nonce, initialization_data) }) .await - .map_err(|err| format!("Failed to submit {} transaction: {:?}", TargetChain::NAME, err))?; + .map_err(|err| Error::SubmitTransaction(TargetChain::NAME, err))?; Ok(initialization_tx_hash) } /// Prepare initialization data for the GRANDPA verifier pallet. async fn prepare_initialization_data( source_client: Client, -) -> Result, String> { +) -> Result< + InitializationData, + Error::Number>, +> { // In ideal world we just need to get best finalized header and then to read GRANDPA authorities // set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at this header. // - // But now there are problems with this approach - `CurrentSetId` may return invalid value. So here - // we're waiting for the next justification, read the authorities set and then try to figure out - // the set id with bruteforce. - let mut justifications = source_client + // But now there are problems with this approach - `CurrentSetId` may return invalid value. So + // here we're waiting for the next justification, read the authorities set and then try to + // figure out the set id with bruteforce. + let justifications = source_client .subscribe_justifications() .await - .map_err(|err| format!("Failed to subscribe to {} justifications: {:?}", SourceChain::NAME, err))?; - + .map_err(|err| Error::Subscribe(SourceChain::NAME, err))?; // Read next justification - the header that it finalizes will be used as initial header. - let justification = justifications.next().await.ok_or_else(|| { - format!( - "Failed to read {} justification from the stream: stream has ended unexpectedly", - SourceChain::NAME, - ) - })?; + let justification = justifications + .next() + .await + .map_err(|e| Error::ReadJustification(SourceChain::NAME, e)) + .and_then(|justification| { + justification.ok_or(Error::ReadJustificationStreamEnded(SourceChain::NAME)) + })?; // Read initial header. - let justification: GrandpaJustification = Decode::decode(&mut &justification.0[..]) - .map_err(|err| format!("Failed to decode {} justification: {:?}", SourceChain::NAME, err))?; + let justification: GrandpaJustification = + Decode::decode(&mut &justification.0[..]) + .map_err(|err| Error::DecodeJustification(SourceChain::NAME, err))?; let (initial_header_hash, initial_header_number) = (justification.commit.target_hash, justification.commit.target_number); @@ -130,7 +140,8 @@ async fn prepare_initialization_data( ); // Read GRANDPA authorities set at initial header. - let initial_authorities_set = source_authorities_set(&source_client, initial_header_hash).await?; + let initial_authorities_set = + source_authorities_set(&source_client, initial_header_hash).await?; log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}", SourceChain::NAME, initial_authorities_set, @@ -149,7 +160,8 @@ async fn prepare_initialization_data( ); let schedules_change = scheduled_change.is_some(); if schedules_change { - authorities_for_verification = source_authorities_set(&source_client, *initial_header.parent_hash()).await?; + authorities_for_verification = + source_authorities_set(&source_client, *initial_header.parent_hash()).await?; log::trace!( target: "bridge", "Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}", @@ -161,13 +173,8 @@ async fn prepare_initialization_data( // Now let's try to guess authorities set id by verifying justification. let mut initial_authorities_set_id = 0; let mut min_possible_block_number = SourceChain::BlockNumber::zero(); - let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()).ok_or_else(|| { - format!( - "Read invalid {} authorities set: {:?}", - SourceChain::NAME, - authorities_for_verification, - ) - })?; + let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()) + .ok_or(Error::ReadInvalidAuthorities(SourceChain::NAME, authorities_for_verification))?; loop { log::trace!( target: "bridge", "Trying {} GRANDPA authorities set id: {}", @@ -184,26 +191,21 @@ async fn prepare_initialization_data( .is_ok(); if is_valid_set_id { - break; + break } initial_authorities_set_id += 1; min_possible_block_number += One::one(); if min_possible_block_number > initial_header_number { - // there can't be more authorities set changes than headers => if we have reached `initial_block_number` - // and still have not found correct value of `initial_authorities_set_id`, then something - // else is broken => fail - return Err(format!( - "Failed to guess initial {} GRANDPA authorities set id: checked all\ - possible ids in range [0; {}]", - SourceChain::NAME, - initial_header_number - )); + // there can't be more authorities set changes than headers => if we have reached + // `initial_block_number` and still have not found correct value of + // `initial_authorities_set_id`, then something else is broken => fail + return Err(Error::GuessInitialAuthorities(SourceChain::NAME, initial_header_number)) } } Ok(InitializationData { - header: initial_header, + header: Box::new(initial_header), authority_list: initial_authorities_set, set_id: if schedules_change { initial_authorities_set_id + 1 @@ -218,39 +220,24 @@ async fn prepare_initialization_data( async fn source_header( source_client: &Client, header_hash: SourceChain::Hash, -) -> Result { - source_client.header_by_hash(header_hash).await.map_err(|err| { - format!( - "Failed to retrive {} header with hash {}: {:?}", - SourceChain::NAME, - header_hash, - err, - ) - }) +) -> Result::Number>> +{ + source_client + .header_by_hash(header_hash) + .await + .map_err(|err| Error::RetrieveHeader(SourceChain::NAME, header_hash, err)) } /// Read GRANDPA authorities set at given header. async fn source_authorities_set( source_client: &Client, header_hash: SourceChain::Hash, -) -> Result { +) -> Result::Number>> +{ let raw_authorities_set = source_client .grandpa_authorities_set(header_hash) .await - .map_err(|err| { - format!( - "Failed to retrive {} GRANDPA authorities set at header {}: {:?}", - SourceChain::NAME, - header_hash, - err, - ) - })?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]).map_err(|err| { - format!( - "Failed to decode {} GRANDPA authorities set at header {}: {:?}", - SourceChain::NAME, - header_hash, - err, - ) - }) + .map_err(|err| Error::RetrieveAuthorities(SourceChain::NAME, header_hash, err))?; + GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) + .map_err(|err| Error::DecodeAuthorities(SourceChain::NAME, header_hash, err)) } diff --git a/polkadot/bridges/relays/client-ethereum/src/lib.rs b/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs similarity index 55% rename from polkadot/bridges/relays/client-ethereum/src/lib.rs rename to polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs index 8b3c6d8f8e733198ede31cc4de9aa95c5d41ea18..f95a8e0aba3ab43f011c2d431f3fb8027609e5f2 100644 --- a/polkadot/bridges/relays/client-ethereum/src/lib.rs +++ b/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs @@ -14,35 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Tools to interact with (Open) Ethereum node using RPC methods. - -#![warn(missing_docs)] - -mod client; -mod error; -mod rpc; -mod sign; - -pub use crate::client::Client; -pub use crate::error::{Error, Result}; -pub use crate::sign::{sign_and_submit_transaction, SigningParams}; - -pub mod types; - -/// Ethereum-over-websocket connection params. -#[derive(Debug, Clone)] -pub struct ConnectionParams { - /// Websocket server hostname. - pub host: String, - /// Websocket server TCP port. - pub port: u16, -} - -impl Default for ConnectionParams { - fn default() -> Self { - ConnectionParams { - host: "localhost".into(), - port: 8546, - } - } +//! Substrate relay helpers + +use relay_utils::metrics::{FloatJsonValueMetric, PrometheusError}; + +/// Creates standalone token price metric. +pub fn token_price_metric(token_id: &str) -> Result { + FloatJsonValueMetric::new( + format!("https://api.coingecko.com/api/v3/simple/price?ids={}&vs_currencies=btc", token_id), + format!("$.{}.btc", token_id), + format!("{}_to_base_conversion_rate", token_id.replace("-", "_")), + format!("Rate used to convert from {} to some BASE tokens", token_id.to_uppercase()), + ) } diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs b/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..cc066bf501ac6048730809b5c795e077d65dcd3b --- /dev/null +++ b/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs @@ -0,0 +1,41 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The library of substrate relay. contains some public codes to provide to substrate relay. + +#![warn(missing_docs)] + +use std::time::Duration; + +pub mod conversion_rate_update; +pub mod error; +pub mod finality_pipeline; +pub mod finality_target; +pub mod headers_initialize; +pub mod helpers; +pub mod messages_lane; +pub mod messages_source; +pub mod messages_target; +pub mod on_demand_headers; + +/// Default relay loop stall timeout. If transactions generated by relay are immortal, then +/// this timeout is used. +/// +/// There are no any strict requirements on block time in Substrate. But we assume here that all +/// Substrate-based chains will be designed to produce relatively fast (compared to the slowest +/// blockchains) blocks. So 1 hour seems to be a good guess for (even congested) chains to mine +/// transaction, or remove it from the pool. +pub const STALL_TIMEOUT: Duration = Duration::from_secs(60 * 60); diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs new file mode 100644 index 0000000000000000000000000000000000000000..6cadb64754a511661627f9eae79a1295a0249e07 --- /dev/null +++ b/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -0,0 +1,426 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools for supporting message lanes between two Substrate-based chains. + +use crate::{ + messages_source::SubstrateMessagesProof, messages_target::SubstrateMessagesReceivingProof, + on_demand_headers::OnDemandHeadersRelay, +}; + +use async_trait::async_trait; +use bp_messages::{LaneId, MessageNonce}; +use bp_runtime::{AccountIdOf, IndexOf}; +use frame_support::weights::Weight; +use messages_relay::{ + message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, + relay_strategy::RelayStrategy, +}; +use relay_substrate_client::{ + metrics::{FloatStorageValueMetric, StorageProofOverheadMetric}, + BlockNumberOf, Chain, Client, HashOf, +}; +use relay_utils::{ + metrics::{ + FloatJsonValueMetric, GlobalMetrics, MetricsParams, PrometheusError, StandaloneMetric, + }, + BlockNumberBase, +}; +use sp_core::{storage::StorageKey, Bytes}; +use sp_runtime::FixedU128; +use std::ops::RangeInclusive; + +/// Substrate <-> Substrate messages relay parameters. +pub struct MessagesRelayParams { + /// Messages source client. + pub source_client: Client, + /// Sign parameters for messages source chain. + pub source_sign: SS, + /// Mortality of source transactions. + pub source_transactions_mortality: Option, + /// Messages target client. + pub target_client: Client, + /// Sign parameters for messages target chain. + pub target_sign: TS, + /// Mortality of target transactions. + pub target_transactions_mortality: Option, + /// Optional on-demand source to target headers relay. + pub source_to_target_headers_relay: Option>, + /// Optional on-demand target to source headers relay. + pub target_to_source_headers_relay: Option>, + /// Identifier of lane that needs to be served. + pub lane_id: LaneId, + /// Metrics parameters. + pub metrics_params: MetricsParams, + /// Pre-registered standalone metrics. + pub standalone_metrics: Option>, + /// Relay strategy + pub relay_strategy: Strategy, +} + +/// Message sync pipeline for Substrate <-> Substrate relays. +#[async_trait] +pub trait SubstrateMessageLane: 'static + Clone + Send + Sync { + /// Underlying generic message lane. + type MessageLane: MessageLane; + + /// Name of the runtime method that returns dispatch weight of outbound messages at the source + /// chain. + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str; + /// Name of the runtime method that returns latest generated nonce at the source chain. + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str; + /// Name of the runtime method that returns latest received (confirmed) nonce at the the source + /// chain. + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; + + /// Name of the runtime method that returns latest received nonce at the target chain. + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; + /// Name of the runtime method that returns the latest confirmed (reward-paid) nonce at the + /// target chain. + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str; + /// Number of the runtime method that returns state of "unrewarded relayers" set at the target + /// chain. + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str; + + /// Name of the runtime method that returns id of best finalized source header at target chain. + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str; + /// Name of the runtime method that returns id of best finalized target header at source chain. + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str; + + /// Name of the messages pallet as it is declared in the `construct_runtime!()` at source chain. + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str; + /// Name of the messages pallet as it is declared in the `construct_runtime!()` at target chain. + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str; + + /// Extra weight of the delivery transaction at the target chain, that is paid to cover + /// dispatch fee payment. + /// + /// If dispatch fee is paid at the source chain, then this weight is refunded by the + /// delivery transaction. + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight; + + /// Source chain. + type SourceChain: Chain; + /// Target chain. + type TargetChain: Chain; + + /// Returns id of account that we're using to sign transactions at target chain (messages + /// proof). + fn target_transactions_author(&self) -> AccountIdOf; + + /// Make messages delivery transaction. + fn make_messages_delivery_transaction( + &self, + best_block_id: TargetHeaderIdOf, + transaction_nonce: IndexOf, + generated_at_header: SourceHeaderIdOf, + nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Bytes; + + /// Returns id of account that we're using to sign transactions at source chain (delivery + /// proof). + fn source_transactions_author(&self) -> AccountIdOf; + + /// Make messages receiving proof transaction. + fn make_messages_receiving_proof_transaction( + &self, + best_block_id: SourceHeaderIdOf, + transaction_nonce: IndexOf, + generated_at_header: TargetHeaderIdOf, + proof: ::MessagesReceivingProof, + ) -> Bytes; +} + +/// Substrate-to-Substrate message lane. +#[derive(Debug)] +pub struct SubstrateMessageLaneToSubstrate< + Source: Chain, + SourceSignParams, + Target: Chain, + TargetSignParams, +> { + /// Client for the source Substrate chain. + pub source_client: Client, + /// Parameters required to sign transactions for source chain. + pub source_sign: SourceSignParams, + /// Source transactions mortality. + pub source_transactions_mortality: Option, + /// Client for the target Substrate chain. + pub target_client: Client, + /// Parameters required to sign transactions for target chain. + pub target_sign: TargetSignParams, + /// Target transactions mortality. + pub target_transactions_mortality: Option, + /// Account id of relayer at the source chain. + pub relayer_id_at_source: Source::AccountId, +} + +impl Clone + for SubstrateMessageLaneToSubstrate +{ + fn clone(&self) -> Self { + Self { + source_client: self.source_client.clone(), + source_sign: self.source_sign.clone(), + source_transactions_mortality: self.source_transactions_mortality, + target_client: self.target_client.clone(), + target_sign: self.target_sign.clone(), + target_transactions_mortality: self.target_transactions_mortality, + relayer_id_at_source: self.relayer_id_at_source.clone(), + } + } +} + +impl MessageLane + for SubstrateMessageLaneToSubstrate +where + SourceSignParams: Clone + Send + Sync + 'static, + TargetSignParams: Clone + Send + Sync + 'static, + BlockNumberOf: BlockNumberBase, + BlockNumberOf: BlockNumberBase, +{ + const SOURCE_NAME: &'static str = Source::NAME; + const TARGET_NAME: &'static str = Target::NAME; + + type MessagesProof = SubstrateMessagesProof; + type MessagesReceivingProof = SubstrateMessagesReceivingProof; + + type SourceChainBalance = Source::Balance; + type SourceHeaderNumber = BlockNumberOf; + type SourceHeaderHash = HashOf; + + type TargetHeaderNumber = BlockNumberOf; + type TargetHeaderHash = HashOf; +} + +/// Returns maximal number of messages and their maximal cumulative dispatch weight, based +/// on given chain parameters. +pub fn select_delivery_transaction_limits( + max_extrinsic_weight: Weight, + max_unconfirmed_messages_at_inbound_lane: MessageNonce, +) -> (MessageNonce, Weight) { + // We may try to guess accurate value, based on maximal number of messages and per-message + // weight overhead, but the relay loop isn't using this info in a super-accurate way anyway. + // So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is + // for messages dispatch. + + // Another thing to keep in mind is that our runtimes (when this code was written) accept + // messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than + // that for dispatch. + + let weight_for_delivery_tx = max_extrinsic_weight / 3; + let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; + + let delivery_tx_base_weight = W::receive_messages_proof_overhead() + + W::receive_messages_proof_outbound_lane_state_overhead(); + let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight; + let max_number_of_messages = std::cmp::min( + delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1), + max_unconfirmed_messages_at_inbound_lane, + ); + + assert!( + max_number_of_messages > 0, + "Relay should fit at least one message in every delivery transaction", + ); + assert!( + weight_for_messages_dispatch >= max_extrinsic_weight / 2, + "Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2", + ); + + (max_number_of_messages, weight_for_messages_dispatch) +} + +/// Shared references to the standalone metrics of the message lane relay loop. +#[derive(Debug, Clone)] +pub struct StandaloneMessagesMetrics { + /// Global metrics. + pub global: GlobalMetrics, + /// Storage chain proof overhead metric. + pub source_storage_proof_overhead: StorageProofOverheadMetric, + /// Target chain proof overhead metric. + pub target_storage_proof_overhead: StorageProofOverheadMetric, + /// Source tokens to base conversion rate metric. + pub source_to_base_conversion_rate: Option, + /// Target tokens to base conversion rate metric. + pub target_to_base_conversion_rate: Option, + /// Source tokens to target tokens conversion rate metric. This rate is stored by the target + /// chain. + pub source_to_target_conversion_rate: + Option>, + /// Target tokens to source tokens conversion rate metric. This rate is stored by the source + /// chain. + pub target_to_source_conversion_rate: + Option>, +} + +impl StandaloneMessagesMetrics { + /// Swap source and target sides. + pub fn reverse(self) -> StandaloneMessagesMetrics { + StandaloneMessagesMetrics { + global: self.global, + source_storage_proof_overhead: self.target_storage_proof_overhead, + target_storage_proof_overhead: self.source_storage_proof_overhead, + source_to_base_conversion_rate: self.target_to_base_conversion_rate, + target_to_base_conversion_rate: self.source_to_base_conversion_rate, + source_to_target_conversion_rate: self.target_to_source_conversion_rate, + target_to_source_conversion_rate: self.source_to_target_conversion_rate, + } + } + + /// Register all metrics in the registry. + pub fn register_and_spawn( + self, + metrics: MetricsParams, + ) -> Result { + self.global.register_and_spawn(&metrics.registry)?; + self.source_storage_proof_overhead.register_and_spawn(&metrics.registry)?; + self.target_storage_proof_overhead.register_and_spawn(&metrics.registry)?; + if let Some(m) = self.source_to_base_conversion_rate { + m.register_and_spawn(&metrics.registry)?; + } + if let Some(m) = self.target_to_base_conversion_rate { + m.register_and_spawn(&metrics.registry)?; + } + if let Some(m) = self.target_to_source_conversion_rate { + m.register_and_spawn(&metrics.registry)?; + } + Ok(metrics) + } + + /// Return conversion rate from target to source tokens. + pub async fn target_to_source_conversion_rate(&self) -> Option { + Self::compute_target_to_source_conversion_rate( + *self.target_to_base_conversion_rate.as_ref()?.shared_value_ref().read().await, + *self.source_to_base_conversion_rate.as_ref()?.shared_value_ref().read().await, + ) + } + + /// Return conversion rate from target to source tokens, given conversion rates from + /// target/source tokens to some base token. + fn compute_target_to_source_conversion_rate( + target_to_base_conversion_rate: Option, + source_to_base_conversion_rate: Option, + ) -> Option { + Some(source_to_base_conversion_rate? / target_to_base_conversion_rate?) + } +} + +/// Create standalone metrics for the message lane relay loop. +/// +/// All metrics returned by this function are exposed by loops that are serving given lane (`P`) +/// and by loops that are serving reverse lane (`P` with swapped `TargetChain` and `SourceChain`). +pub fn standalone_metrics( + source_client: Client, + target_client: Client, + source_chain_token_id: Option<&str>, + target_chain_token_id: Option<&str>, + source_to_target_conversion_rate_params: Option<(StorageKey, FixedU128)>, + target_to_source_conversion_rate_params: Option<(StorageKey, FixedU128)>, +) -> anyhow::Result> { + Ok(StandaloneMessagesMetrics { + global: GlobalMetrics::new()?, + source_storage_proof_overhead: StorageProofOverheadMetric::new( + source_client.clone(), + format!("{}_storage_proof_overhead", SC::NAME.to_lowercase()), + format!("{} storage proof overhead", SC::NAME), + )?, + target_storage_proof_overhead: StorageProofOverheadMetric::new( + target_client.clone(), + format!("{}_storage_proof_overhead", TC::NAME.to_lowercase()), + format!("{} storage proof overhead", TC::NAME), + )?, + source_to_base_conversion_rate: source_chain_token_id + .map(|source_chain_token_id| { + crate::helpers::token_price_metric(source_chain_token_id).map(Some) + }) + .unwrap_or(Ok(None))?, + target_to_base_conversion_rate: target_chain_token_id + .map(|target_chain_token_id| { + crate::helpers::token_price_metric(target_chain_token_id).map(Some) + }) + .unwrap_or(Ok(None))?, + source_to_target_conversion_rate: source_to_target_conversion_rate_params + .map(|(key, rate)| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + target_client, + key, + Some(rate), + format!("{}_{}_to_{}_conversion_rate", TC::NAME, SC::NAME, TC::NAME), + format!( + "{} to {} tokens conversion rate (used by {})", + SC::NAME, + TC::NAME, + TC::NAME + ), + ) + .map(Some) + }) + .unwrap_or(Ok(None))?, + target_to_source_conversion_rate: target_to_source_conversion_rate_params + .map(|(key, rate)| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + source_client, + key, + Some(rate), + format!("{}_{}_to_{}_conversion_rate", SC::NAME, TC::NAME, SC::NAME), + format!( + "{} to {} tokens conversion rate (used by {})", + TC::NAME, + SC::NAME, + SC::NAME + ), + ) + .map(Some) + }) + .unwrap_or(Ok(None))?, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + type RialtoToMillauMessagesWeights = + pallet_bridge_messages::weights::RialtoWeight; + + #[test] + fn select_delivery_transaction_limits_works() { + let (max_count, max_weight) = + select_delivery_transaction_limits::( + bp_millau::max_extrinsic_weight(), + bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + assert_eq!( + (max_count, max_weight), + // We don't actually care about these values, so feel free to update them whenever test + // fails. The only thing to do before that is to ensure that new values looks sane: + // i.e. weight reserved for messages dispatch allows dispatch of non-trivial messages. + // + // Any significant change in this values should attract additional attention. + (782, 216_583_333_334), + ); + } + + #[async_std::test] + async fn target_to_source_conversion_rate_works() { + assert_eq!( + StandaloneMessagesMetrics::::compute_target_to_source_conversion_rate(Some(183.15), Some(12.32)), + Some(12.32 / 183.15), + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/messages_source.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs similarity index 53% rename from polkadot/bridges/relays/bin-substrate/src/messages_source.rs rename to polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs index 88c8b529dcc619f082e9d21ce55e77ccb3c212e5..5f066296e7e71517838f1513e63511dba3eda635 100644 --- a/polkadot/bridges/relays/bin-substrate/src/messages_source.rs +++ b/polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs @@ -18,26 +18,37 @@ //! runtime that implements `HeaderApi` to allow bridging with //! chain. -use crate::messages_lane::SubstrateMessageLane; -use crate::on_demand_headers::OnDemandHeadersRelay; +use crate::{ + messages_lane::SubstrateMessageLane, messages_target::SubstrateMessagesReceivingProof, + on_demand_headers::OnDemandHeadersRelay, +}; use async_trait::async_trait; -use bp_messages::{LaneId, MessageNonce}; -use bp_runtime::{messages::DispatchFeePayment, ChainId}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bridge_runtime_common::messages::{ + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, +}; use codec::{Decode, Encode}; -use frame_support::{traits::Instance, weights::Weight}; +use frame_support::weights::Weight; use messages_relay::{ - message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, + message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, message_lane_loop::{ - ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient, SourceClientState, + ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient, + SourceClientState, }, }; -use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf}; +use num_traits::{Bounded, Zero}; +use relay_substrate_client::{ + BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf, HeaderOf, + IndexOf, +}; use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId}; use sp_core::Bytes; -use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; -use std::{marker::PhantomData, ops::RangeInclusive}; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, Header as HeaderT}, + DeserializeOwned, +}; +use std::ops::RangeInclusive; /// Intermediate message proof returned by the source Substrate node. Includes everything /// required to submit to the target node: cumulative dispatch weight of bundled messages and @@ -45,55 +56,38 @@ use std::{marker::PhantomData, ops::RangeInclusive}; pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); /// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - client: Client, +pub struct SubstrateMessagesSource { + client: Client, lane: P, lane_id: LaneId, - instance: ChainId, target_to_source_headers_relay: Option>, - _phantom: PhantomData, } -impl SubstrateMessagesSource { +impl SubstrateMessagesSource

{ /// Create new Substrate headers source. pub fn new( - client: Client, + client: Client, lane: P, lane_id: LaneId, - instance: ChainId, target_to_source_headers_relay: Option>, ) -> Self { - SubstrateMessagesSource { - client, - lane, - lane_id, - instance, - target_to_source_headers_relay, - _phantom: Default::default(), - } + SubstrateMessagesSource { client, lane, lane_id, target_to_source_headers_relay } } } -impl Clone for SubstrateMessagesSource { +impl Clone for SubstrateMessagesSource

{ fn clone(&self) -> Self { Self { client: self.client.clone(), lane: self.lane.clone(), lane_id: self.lane_id, - instance: self.instance, target_to_source_headers_relay: self.target_to_source_headers_relay.clone(), - _phantom: Default::default(), } } } #[async_trait] -impl RelayClient for SubstrateMessagesSource -where - C: Chain, - P: SubstrateMessageLane, - I: Send + Sync + Instance, -{ +impl RelayClient for SubstrateMessagesSource

{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -102,40 +96,49 @@ where } #[async_trait] -impl SourceClient

for SubstrateMessagesSource +impl

SourceClient for SubstrateMessagesSource

where - C: Chain, - C::Header: DeserializeOwned, - C::Index: DeserializeOwned, - C::BlockNumber: BlockNumberBase, - P: SubstrateMessageLane< - MessagesProof = SubstrateMessagesProof, - SourceChainBalance = C::Balance, - SourceHeaderNumber = ::Number, - SourceHeaderHash = ::Hash, - SourceChain = C, + P: SubstrateMessageLane, + P::SourceChain: Chain< + Hash = ::SourceHeaderHash, + BlockNumber = ::SourceHeaderNumber, + Balance = ::SourceChainBalance, >, - P::TargetChain: Chain, - P::TargetHeaderNumber: Decode, - P::TargetHeaderHash: Decode, - I: Send + Sync + Instance, + BalanceOf: Decode + Bounded, + IndexOf: DeserializeOwned, + HashOf: Copy, + BlockNumberOf: BlockNumberBase + Copy, + HeaderOf: DeserializeOwned, + P::TargetChain: Chain< + Hash = ::TargetHeaderHash, + BlockNumber = ::TargetHeaderNumber, + >, + + P::MessageLane: MessageLane< + MessagesProof = SubstrateMessagesProof, + MessagesReceivingProof = SubstrateMessagesReceivingProof, + >, + ::TargetHeaderNumber: Decode, + ::TargetHeaderHash: Decode, + ::SourceChainBalance: AtLeast32BitUnsigned, { - async fn state(&self) -> Result, SubstrateError> { + async fn state(&self) -> Result, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because // it may have already received confirmations that we're going to deliver self.client.ensure_synced().await?; - read_client_state::<_, P::TargetHeaderHash, P::TargetHeaderNumber>( - &self.client, - P::BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE, - ) + read_client_state::< + _, + ::TargetHeaderHash, + ::TargetHeaderNumber, + >(&self.client, P::BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE) .await } async fn latest_generated_nonce( &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), SubstrateError> { + id: SourceHeaderIdOf, + ) -> Result<(SourceHeaderIdOf, MessageNonce), SubstrateError> { let encoded_response = self .client .state_call( @@ -144,15 +147,15 @@ where Some(id.1), ) .await?; - let latest_generated_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + let latest_generated_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?; Ok((id, latest_generated_nonce)) } async fn latest_confirmed_received_nonce( &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), SubstrateError> { + id: SourceHeaderIdOf, + ) -> Result<(SourceHeaderIdOf, MessageNonce), SubstrateError> { let encoded_response = self .client .state_call( @@ -161,16 +164,19 @@ where Some(id.1), ) .await?; - let latest_received_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?; Ok((id, latest_received_nonce)) } async fn generated_message_details( &self, - id: SourceHeaderIdOf

, + id: SourceHeaderIdOf, nonces: RangeInclusive, - ) -> Result, SubstrateError> { + ) -> Result< + MessageDetailsMap<::SourceChainBalance>, + SubstrateError, + > { let encoded_response = self .client .state_call( @@ -180,37 +186,46 @@ where ) .await?; - make_message_details_map::( - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?, + make_message_details_map::( + Decode::decode(&mut &encoded_response.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?, nonces, ) } async fn prove_messages( &self, - id: SourceHeaderIdOf

, + id: SourceHeaderIdOf, nonces: RangeInclusive, proof_parameters: MessageProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), SubstrateError> { - let mut storage_keys = Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); + ) -> Result< + ( + SourceHeaderIdOf, + RangeInclusive, + ::MessagesProof, + ), + SubstrateError, + > { + let mut storage_keys = + Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); let mut message_nonce = *nonces.start(); while message_nonce <= *nonces.end() { - let message_key = pallet_bridge_messages::storage_keys::message_key::(&self.lane_id, message_nonce); + let message_key = pallet_bridge_messages::storage_keys::message_key( + P::MESSAGE_PALLET_NAME_AT_SOURCE, + &self.lane_id, + message_nonce, + ); storage_keys.push(message_key); message_nonce += 1; } if proof_parameters.outbound_state_proof_required { - storage_keys.push(pallet_bridge_messages::storage_keys::outbound_lane_data_key::( + storage_keys.push(pallet_bridge_messages::storage_keys::outbound_lane_data_key( + P::MESSAGE_PALLET_NAME_AT_SOURCE, &self.lane_id, )); } - let proof = self - .client - .prove_storage(storage_keys, id.1) - .await? - .iter_nodes() - .collect(); + let proof = self.client.prove_storage(storage_keys, id.1).await?.iter_nodes().collect(); let proof = FromBridgedChainMessagesProof { bridged_header_hash: id.1, storage_proof: proof, @@ -223,33 +238,87 @@ where async fn submit_messages_receiving_proof( &self, - generated_at_block: TargetHeaderIdOf

, - proof: P::MessagesReceivingProof, + generated_at_block: TargetHeaderIdOf, + proof: ::MessagesReceivingProof, ) -> Result<(), SubstrateError> { + let lane = self.lane.clone(); self.client - .submit_signed_extrinsic(self.lane.source_transactions_author(), move |transaction_nonce| { - self.lane - .make_messages_receiving_proof_transaction(transaction_nonce, generated_at_block, proof) - }) + .submit_signed_extrinsic( + self.lane.source_transactions_author(), + move |best_block_id, transaction_nonce| { + lane.make_messages_receiving_proof_transaction( + best_block_id, + transaction_nonce, + generated_at_block, + proof, + ) + }, + ) .await?; Ok(()) } - async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

) { + async fn require_target_header_on_source(&self, id: TargetHeaderIdOf) { if let Some(ref target_to_source_headers_relay) = self.target_to_source_headers_relay { target_to_source_headers_relay.require_finalized_header(id).await; } } - async fn estimate_confirmation_transaction(&self) -> P::SourceChainBalance { - num_traits::Zero::zero() // TODO: https://github.com/paritytech/parity-bridges-common/issues/997 + async fn estimate_confirmation_transaction( + &self, + ) -> ::SourceChainBalance { + self.client + .estimate_extrinsic_fee(self.lane.make_messages_receiving_proof_transaction( + HeaderId(Default::default(), Default::default()), + Zero::zero(), + HeaderId(Default::default(), Default::default()), + prepare_dummy_messages_delivery_proof::(), + )) + .await + .map(|fee| fee.inclusion_fee()) + .unwrap_or_else(|_| BalanceOf::::max_value()) } } +/// Prepare 'dummy' messages delivery proof that will compose the delivery confirmation transaction. +/// +/// We don't care about proof actually being the valid proof, because its validity doesn't +/// affect the call weight - we only care about its size. +fn prepare_dummy_messages_delivery_proof( +) -> SubstrateMessagesReceivingProof { + let single_message_confirmation_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( + SC::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + 1, + 1, + ) + .unwrap_or(u32::MAX); + let proof_size = TC::STORAGE_PROOF_OVERHEAD.saturating_add(single_message_confirmation_size); + ( + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + }, + FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: Default::default(), + storage_proof: vec![vec![0; proof_size as usize]], + lane: Default::default(), + }, + ) +} + +/// Read best blocks from given client. +/// +/// This function assumes that the chain that is followed by the `self_client` has +/// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name` +/// runtime API to read the best finalized Bridged chain header. pub async fn read_client_state( self_client: &Client, best_finalized_header_id_method_name: &str, -) -> Result, HeaderId>, SubstrateError> +) -> Result< + ClientState, HeaderId>, + SubstrateError, +> where SelfChain: Chain, SelfChain::Header: DeserializeOwned, @@ -259,8 +328,10 @@ where { // let's read our state first: we need best finalized header hash on **this** chain let self_best_finalized_header_hash = self_client.best_finalized_header_hash().await?; - let self_best_finalized_header = self_client.header_by_hash(self_best_finalized_header_hash).await?; - let self_best_finalized_id = HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash); + let self_best_finalized_header = + self_client.header_by_hash(self_best_finalized_header_hash).await?; + let self_best_finalized_id = + HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash); // now let's read our best header on **this** chain let self_best_header = self_client.best_header().await?; @@ -276,11 +347,10 @@ where ) .await?; let decoded_best_finalized_peer_on_self: (BridgedHeaderNumber, BridgedHeaderHash) = - Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - let peer_on_self_best_finalized_id = HeaderId( - decoded_best_finalized_peer_on_self.0, - decoded_best_finalized_peer_on_self.1, - ); + Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?; + let peer_on_self_best_finalized_id = + HeaderId(decoded_best_finalized_peer_on_self.0, decoded_best_finalized_peer_on_self.1); Ok(ClientState { best_self: self_best_id, @@ -295,7 +365,7 @@ fn make_message_details_map( ) -> Result, SubstrateError> { let make_missing_nonce_error = |expected_nonce| { Err(SubstrateError::Custom(format!( - "Missing nonce {} in messages_dispatch_weight call result. Expected all nonces from {:?}", + "Missing nonce {} in message_details call result. Expected all nonces from {:?}", expected_nonce, nonces, ))) }; @@ -304,16 +374,14 @@ fn make_message_details_map( // this is actually prevented by external logic if nonces.is_empty() { - return Ok(weights_map); + return Ok(weights_map) } // check if last nonce is missing - loop below is not checking this - let last_nonce_is_missing = weights - .last() - .map(|details| details.nonce != *nonces.end()) - .unwrap_or(true); + let last_nonce_is_missing = + weights.last().map(|details| details.nonce != *nonces.end()).unwrap_or(true); if last_nonce_is_missing { - return make_missing_nonce_error(*nonces.end()); + return make_missing_nonce_error(*nonces.end()) } let mut expected_nonce = *nonces.start(); @@ -325,20 +393,21 @@ fn make_message_details_map( (false, true) => { // this may happen if some messages were already pruned from the source node // - // this is not critical error and will be auto-resolved by messages lane (and target node) + // this is not critical error and will be auto-resolved by messages lane (and target + // node) log::info!( target: "bridge", "Some messages are missing from the {} node: {:?}. Target node may be out of sync?", C::NAME, expected_nonce..details.nonce, ); - } + }, (false, false) => { // some nonces are missing from the middle/tail of the range // // this is critical error, because we can't miss any nonces - return make_missing_nonce_error(expected_nonce); - } + return make_missing_nonce_error(expected_nonce) + }, } weights_map.insert( @@ -346,9 +415,8 @@ fn make_message_details_map( MessageDetails { dispatch_weight: details.dispatch_weight, size: details.size as _, - // TODO: https://github.com/paritytech/parity-bridges-common/issues/997 - reward: num_traits::Zero::zero(), - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + reward: details.delivery_and_dispatch_fee, + dispatch_fee_payment: details.dispatch_fee_payment, }, ); expected_nonce = details.nonce + 1; @@ -361,10 +429,13 @@ fn make_message_details_map( #[cfg(test)] mod tests { use super::*; + use bp_runtime::messages::DispatchFeePayment; + use relay_rococo_client::Rococo; + use relay_wococo_client::Wococo; fn message_details_from_rpc( nonces: RangeInclusive, - ) -> Vec> { + ) -> Vec> { nonces .into_iter() .map(|nonce| bp_messages::MessageDetails { @@ -380,7 +451,7 @@ mod tests { #[test] fn make_message_details_map_succeeds_if_no_messages_are_missing() { assert_eq!( - make_message_details_map::(message_details_from_rpc(1..=3), 1..=3,).unwrap(), + make_message_details_map::(message_details_from_rpc(1..=3), 1..=3,).unwrap(), vec![ ( 1, @@ -418,7 +489,7 @@ mod tests { #[test] fn make_message_details_map_succeeds_if_head_messages_are_missing() { assert_eq!( - make_message_details_map::(message_details_from_rpc(2..=3), 1..=3,).unwrap(), + make_message_details_map::(message_details_from_rpc(2..=3), 1..=3,).unwrap(), vec![ ( 2, @@ -449,7 +520,7 @@ mod tests { let mut message_details_from_rpc = message_details_from_rpc(1..=3); message_details_from_rpc.remove(1); assert!(matches!( - make_message_details_map::(message_details_from_rpc, 1..=3,), + make_message_details_map::(message_details_from_rpc, 1..=3,), Err(SubstrateError::Custom(_)) )); } @@ -457,7 +528,7 @@ mod tests { #[test] fn make_message_details_map_fails_if_tail_messages_are_missing() { assert!(matches!( - make_message_details_map::(message_details_from_rpc(1..=2), 1..=3,), + make_message_details_map::(message_details_from_rpc(1..=2), 1..=3,), Err(SubstrateError::Custom(_)) )); } @@ -465,8 +536,21 @@ mod tests { #[test] fn make_message_details_map_fails_if_all_messages_are_missing() { assert!(matches!( - make_message_details_map::(vec![], 1..=3), + make_message_details_map::(vec![], 1..=3), Err(SubstrateError::Custom(_)) )); } + + #[test] + fn prepare_dummy_messages_delivery_proof_works() { + let expected_minimal_size = + Wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE + Rococo::STORAGE_PROOF_OVERHEAD; + let dummy_proof = prepare_dummy_messages_delivery_proof::(); + assert!( + dummy_proof.1.encode().len() as u32 > expected_minimal_size, + "Expected proof size at least {}. Got: {}", + expected_minimal_size, + dummy_proof.1.encode().len(), + ); + } } diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs new file mode 100644 index 0000000000000000000000000000000000000000..eafc6bd3fc5f7efac68b93fbf16a7860313fc5b6 --- /dev/null +++ b/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -0,0 +1,566 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate client as Substrate messages target. The chain we connect to should have +//! runtime that implements `HeaderApi` to allow bridging with +//! chain. + +use crate::{ + messages_lane::{StandaloneMessagesMetrics, SubstrateMessageLane}, + messages_source::{read_client_state, SubstrateMessagesProof}, + on_demand_headers::OnDemandHeadersRelay, +}; + +use async_trait::async_trait; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; + +use bridge_runtime_common::messages::{ + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, +}; +use codec::{Decode, Encode}; +use frame_support::weights::{Weight, WeightToFeePolynomial}; +use messages_relay::{ + message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, + message_lane_loop::{TargetClient, TargetClientState}, +}; +use num_traits::{Bounded, Zero}; +use relay_substrate_client::{ + BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderOf, IndexOf, + WeightToFeeOf, +}; +use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId}; +use sp_core::Bytes; +use sp_runtime::{traits::Saturating, DeserializeOwned, FixedPointNumber, FixedU128}; +use std::{convert::TryFrom, ops::RangeInclusive}; + +/// Message receiving proof returned by the target Substrate node. +pub type SubstrateMessagesReceivingProof = + (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); + +/// Substrate client as Substrate messages target. +pub struct SubstrateMessagesTarget { + client: Client, + lane: P, + lane_id: LaneId, + metric_values: StandaloneMessagesMetrics, + source_to_target_headers_relay: Option>, +} + +impl SubstrateMessagesTarget

{ + /// Create new Substrate headers target. + pub fn new( + client: Client, + lane: P, + lane_id: LaneId, + metric_values: StandaloneMessagesMetrics, + source_to_target_headers_relay: Option>, + ) -> Self { + SubstrateMessagesTarget { + client, + lane, + lane_id, + metric_values, + source_to_target_headers_relay, + } + } +} + +impl Clone for SubstrateMessagesTarget

{ + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + lane: self.lane.clone(), + lane_id: self.lane_id, + metric_values: self.metric_values.clone(), + source_to_target_headers_relay: self.source_to_target_headers_relay.clone(), + } + } +} + +#[async_trait] +impl RelayClient for SubstrateMessagesTarget

{ + type Error = SubstrateError; + + async fn reconnect(&mut self) -> Result<(), SubstrateError> { + self.client.reconnect().await + } +} + +#[async_trait] +impl

TargetClient for SubstrateMessagesTarget

+where + P: SubstrateMessageLane, + P::SourceChain: Chain< + Hash = ::SourceHeaderHash, + BlockNumber = ::SourceHeaderNumber, + Balance = ::SourceChainBalance, + >, + BalanceOf: TryFrom> + Bounded, + P::TargetChain: Chain< + Hash = ::TargetHeaderHash, + BlockNumber = ::TargetHeaderNumber, + >, + IndexOf: DeserializeOwned, + HashOf: Copy, + BlockNumberOf: Copy, + HeaderOf: DeserializeOwned, + BlockNumberOf: BlockNumberBase, + P::MessageLane: MessageLane< + MessagesProof = SubstrateMessagesProof, + MessagesReceivingProof = SubstrateMessagesReceivingProof, + >, + ::SourceHeaderNumber: Decode, + ::SourceHeaderHash: Decode, +{ + async fn state(&self) -> Result, SubstrateError> { + // we can't continue to deliver messages if target node is out of sync, because + // it may have already received (some of) messages that we're going to deliver + self.client.ensure_synced().await?; + + read_client_state::< + _, + ::SourceHeaderHash, + ::SourceHeaderNumber, + >(&self.client, P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET) + .await + } + + async fn latest_received_nonce( + &self, + id: TargetHeaderIdOf, + ) -> Result<(TargetHeaderIdOf, MessageNonce), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, latest_received_nonce)) + } + + async fn latest_confirmed_received_nonce( + &self, + id: TargetHeaderIdOf, + ) -> Result<(TargetHeaderIdOf, MessageNonce), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, latest_received_nonce)) + } + + async fn unrewarded_relayers_state( + &self, + id: TargetHeaderIdOf, + ) -> Result<(TargetHeaderIdOf, UnrewardedRelayersState), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::INBOUND_LANE_UNREWARDED_RELAYERS_STATE.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let unrewarded_relayers_state: UnrewardedRelayersState = + Decode::decode(&mut &encoded_response.0[..]) + .map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, unrewarded_relayers_state)) + } + + async fn prove_messages_receiving( + &self, + id: TargetHeaderIdOf, + ) -> Result< + (TargetHeaderIdOf, ::MessagesReceivingProof), + SubstrateError, + > { + let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; + let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key( + P::MESSAGE_PALLET_NAME_AT_TARGET, + &self.lane_id, + ); + let proof = self + .client + .prove_storage(vec![inbound_data_key], id.1) + .await? + .iter_nodes() + .collect(); + let proof = FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: id.1, + storage_proof: proof, + lane: self.lane_id, + }; + Ok((id, (relayers_state, proof))) + } + + async fn submit_messages_proof( + &self, + generated_at_header: SourceHeaderIdOf, + nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Result, SubstrateError> { + let lane = self.lane.clone(); + let nonces_clone = nonces.clone(); + self.client + .submit_signed_extrinsic( + self.lane.target_transactions_author(), + move |best_block_id, transaction_nonce| { + lane.make_messages_delivery_transaction( + best_block_id, + transaction_nonce, + generated_at_header, + nonces_clone, + proof, + ) + }, + ) + .await?; + Ok(nonces) + } + + async fn require_source_header_on_target(&self, id: SourceHeaderIdOf) { + if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay { + source_to_target_headers_relay.require_finalized_header(id).await; + } + } + + async fn estimate_delivery_transaction_in_source_tokens( + &self, + nonces: RangeInclusive, + total_prepaid_nonces: MessageNonce, + total_dispatch_weight: Weight, + total_size: u32, + ) -> Result<::SourceChainBalance, SubstrateError> { + let conversion_rate = + self.metric_values.target_to_source_conversion_rate().await.ok_or_else(|| { + SubstrateError::Custom(format!( + "Failed to compute conversion rate from {} to {}", + P::TargetChain::NAME, + P::SourceChain::NAME, + )) + })?; + + // Prepare 'dummy' delivery transaction - we only care about its length and dispatch weight. + let delivery_tx = self.lane.make_messages_delivery_transaction( + HeaderId(Default::default(), Default::default()), + Zero::zero(), + HeaderId(Default::default(), Default::default()), + nonces.clone(), + prepare_dummy_messages_proof::( + nonces.clone(), + total_dispatch_weight, + total_size, + ), + ); + let delivery_tx_fee = self.client.estimate_extrinsic_fee(delivery_tx).await?; + let inclusion_fee_in_target_tokens = delivery_tx_fee.inclusion_fee(); + + // The pre-dispatch cost of delivery transaction includes additional fee to cover dispatch + // fee payment (Currency::transfer in regular deployment). But if message dispatch has + // already been paid at the Source chain, the delivery transaction will refund relayer with + // this additional cost. But `estimate_extrinsic_fee` obviously just returns pre-dispatch + // cost of the transaction. So if transaction delivers prepaid message, then it may happen + // that pre-dispatch cost is larger than reward and `Rational` relayer will refuse to + // deliver this message. + // + // The most obvious solution would be to deduct total weight of dispatch fee payments from + // the `total_dispatch_weight` and use regular `estimate_extrinsic_fee` call. But what if + // `total_dispatch_weight` is less than total dispatch fee payments weight? Weight is + // strictly positive, so we can't use this option. + // + // Instead we'll be directly using `WeightToFee` and `NextFeeMultiplier` of the Target + // chain. This requires more knowledge of the Target chain, but seems there's no better way + // to solve this now. + let expected_refund_in_target_tokens = if total_prepaid_nonces != 0 { + const WEIGHT_DIFFERENCE: Weight = 100; + + let larger_dispatch_weight = total_dispatch_weight.saturating_add(WEIGHT_DIFFERENCE); + let larger_delivery_tx_fee = self + .client + .estimate_extrinsic_fee(self.lane.make_messages_delivery_transaction( + HeaderId(Default::default(), Default::default()), + Zero::zero(), + HeaderId(Default::default(), Default::default()), + nonces.clone(), + prepare_dummy_messages_proof::( + nonces.clone(), + larger_dispatch_weight, + total_size, + ), + )) + .await?; + + compute_prepaid_messages_refund::

( + total_prepaid_nonces, + compute_fee_multiplier::( + delivery_tx_fee.adjusted_weight_fee, + total_dispatch_weight, + larger_delivery_tx_fee.adjusted_weight_fee, + larger_dispatch_weight, + ), + ) + } else { + Zero::zero() + }; + + let delivery_fee_in_source_tokens = + convert_target_tokens_to_source_tokens::( + FixedU128::from_float(conversion_rate), + inclusion_fee_in_target_tokens.saturating_sub(expected_refund_in_target_tokens), + ); + + log::trace!( + target: "bridge", + "Estimated {} -> {} messages delivery transaction.\n\t\ + Total nonces: {:?}\n\t\ + Prepaid messages: {}\n\t\ + Total messages size: {}\n\t\ + Total messages dispatch weight: {}\n\t\ + Inclusion fee (in {1} tokens): {:?}\n\t\ + Expected refund (in {1} tokens): {:?}\n\t\ + {1} -> {0} conversion rate: {:?}\n\t\ + Expected delivery tx fee (in {0} tokens): {:?}", + P::SourceChain::NAME, + P::TargetChain::NAME, + nonces, + total_prepaid_nonces, + total_size, + total_dispatch_weight, + inclusion_fee_in_target_tokens, + expected_refund_in_target_tokens, + conversion_rate, + delivery_fee_in_source_tokens, + ); + + Ok(delivery_fee_in_source_tokens) + } +} + +/// Prepare 'dummy' messages proof that will compose the delivery transaction. +/// +/// We don't care about proof actually being the valid proof, because its validity doesn't +/// affect the call weight - we only care about its size. +fn prepare_dummy_messages_proof( + nonces: RangeInclusive, + total_dispatch_weight: Weight, + total_size: u32, +) -> SubstrateMessagesProof { + ( + total_dispatch_weight, + FromBridgedChainMessagesProof { + bridged_header_hash: Default::default(), + storage_proof: vec![vec![ + 0; + SC::STORAGE_PROOF_OVERHEAD.saturating_add(total_size) as usize + ]], + lane: Default::default(), + nonces_start: *nonces.start(), + nonces_end: *nonces.end(), + }, + ) +} + +/// Given delivery transaction fee in target chain tokens and conversion rate to the source +/// chain tokens, compute transaction cost in source chain tokens. +fn convert_target_tokens_to_source_tokens( + target_to_source_conversion_rate: FixedU128, + target_transaction_fee: TC::Balance, +) -> SC::Balance +where + SC::Balance: TryFrom, +{ + SC::Balance::try_from( + target_to_source_conversion_rate.saturating_mul_int(target_transaction_fee), + ) + .unwrap_or_else(|_| SC::Balance::max_value()) +} + +/// Compute fee multiplier that is used by the chain, given a couple of fees for transactions +/// that are only differ in dispatch weights. +/// +/// This function assumes that standard transaction payment pallet is used by the chain. +/// The only fee component that depends on dispatch weight is the `adjusted_weight_fee`. +/// +/// **WARNING**: this functions will only be accurate if weight-to-fee conversion function +/// is linear. For non-linear polynomials the error will grow with `weight_difference` growth. +/// So better to use smaller differences. +fn compute_fee_multiplier( + smaller_adjusted_weight_fee: BalanceOf, + smaller_tx_weight: Weight, + larger_adjusted_weight_fee: BalanceOf, + larger_tx_weight: Weight, +) -> FixedU128 { + let adjusted_weight_fee_difference = + larger_adjusted_weight_fee.saturating_sub(smaller_adjusted_weight_fee); + let smaller_tx_unadjusted_weight_fee = WeightToFeeOf::::calc(&smaller_tx_weight); + let larger_tx_unadjusted_weight_fee = WeightToFeeOf::::calc(&larger_tx_weight); + FixedU128::saturating_from_rational( + adjusted_weight_fee_difference, + larger_tx_unadjusted_weight_fee.saturating_sub(smaller_tx_unadjusted_weight_fee), + ) +} + +/// Compute fee that will be refunded to the relayer because dispatch of `total_prepaid_nonces` +/// messages has been paid at the source chain. +fn compute_prepaid_messages_refund( + total_prepaid_nonces: MessageNonce, + fee_multiplier: FixedU128, +) -> BalanceOf { + fee_multiplier.saturating_mul_int(WeightToFeeOf::::calc( + &P::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN.saturating_mul(total_prepaid_nonces), + )) +} + +#[cfg(test)] +mod tests { + use super::*; + use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams}; + use relay_wococo_client::{SigningParams as WococoSigningParams, Wococo}; + + #[derive(Clone)] + struct TestSubstrateMessageLane; + + impl SubstrateMessageLane for TestSubstrateMessageLane { + type MessageLane = crate::messages_lane::SubstrateMessageLaneToSubstrate< + Rococo, + RococoSigningParams, + Wococo, + WococoSigningParams, + >; + + const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = ""; + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = ""; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = ""; + + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = ""; + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = ""; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = ""; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = ""; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = ""; + + const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = ""; + const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = ""; + + const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = 100_000; + + type SourceChain = Rococo; + type TargetChain = Wococo; + + fn source_transactions_author(&self) -> bp_rococo::AccountId { + unreachable!() + } + + fn make_messages_receiving_proof_transaction( + &self, + _best_block_id: SourceHeaderIdOf, + _transaction_nonce: IndexOf, + _generated_at_block: TargetHeaderIdOf, + _proof: ::MessagesReceivingProof, + ) -> Bytes { + unreachable!() + } + + fn target_transactions_author(&self) -> bp_wococo::AccountId { + unreachable!() + } + + fn make_messages_delivery_transaction( + &self, + _best_block_id: TargetHeaderIdOf, + _transaction_nonce: IndexOf, + _generated_at_header: SourceHeaderIdOf, + _nonces: RangeInclusive, + _proof: ::MessagesProof, + ) -> Bytes { + unreachable!() + } + } + + #[test] + fn prepare_dummy_messages_proof_works() { + const DISPATCH_WEIGHT: Weight = 1_000_000; + const SIZE: u32 = 1_000; + let dummy_proof = prepare_dummy_messages_proof::(1..=10, DISPATCH_WEIGHT, SIZE); + assert_eq!(dummy_proof.0, DISPATCH_WEIGHT); + assert!( + dummy_proof.1.encode().len() as u32 > SIZE, + "Expected proof size at least {}. Got: {}", + SIZE, + dummy_proof.1.encode().len(), + ); + } + + #[test] + fn convert_target_tokens_to_source_tokens_works() { + assert_eq!( + convert_target_tokens_to_source_tokens::((150, 100).into(), 1_000), + 1_500 + ); + assert_eq!( + convert_target_tokens_to_source_tokens::((50, 100).into(), 1_000), + 500 + ); + assert_eq!( + convert_target_tokens_to_source_tokens::((100, 100).into(), 1_000), + 1_000 + ); + } + + #[test] + fn compute_fee_multiplier_returns_sane_results() { + let multiplier = FixedU128::saturating_from_rational(1, 1000); + + let smaller_weight = 1_000_000; + let smaller_adjusted_weight_fee = + multiplier.saturating_mul_int(WeightToFeeOf::::calc(&smaller_weight)); + + let larger_weight = smaller_weight + 200_000; + let larger_adjusted_weight_fee = + multiplier.saturating_mul_int(WeightToFeeOf::::calc(&larger_weight)); + + assert_eq!( + compute_fee_multiplier::( + smaller_adjusted_weight_fee, + smaller_weight, + larger_adjusted_weight_fee, + larger_weight, + ), + multiplier, + ); + } + + #[test] + fn compute_prepaid_messages_refund_returns_sane_results() { + assert!( + compute_prepaid_messages_refund::( + 10, + FixedU128::saturating_from_rational(110, 100), + ) > (10 * TestSubstrateMessageLane::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN) + .into() + ); + } +} diff --git a/polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs b/polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs similarity index 78% rename from polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs rename to polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs index 4a2b04328b8624a47e9cc515832e0c53d8ddab2b..ee141866eb97d5d4ff3c82874b4d9b5296b1c88c 100644 --- a/polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs +++ b/polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs @@ -16,32 +16,38 @@ //! On-demand Substrate -> Substrate headers relay. -use crate::finality_pipeline::{ - SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, RECENT_FINALITY_PROOFS_LIMIT, STALL_TIMEOUT, -}; -use crate::finality_target::SubstrateFinalityTarget; +use std::fmt::Debug; use async_std::sync::{Arc, Mutex}; -use bp_header_chain::justification::GrandpaJustification; +use futures::{select, FutureExt}; +use num_traits::{CheckedSub, One, Zero}; + use finality_relay::{ FinalitySyncParams, FinalitySyncPipeline, SourceClient as FinalitySourceClient, SourceHeader, TargetClient as FinalityTargetClient, }; -use futures::{select, FutureExt}; -use num_traits::{CheckedSub, One, Zero}; use relay_substrate_client::{ finality_source::{FinalitySource as SubstrateFinalitySource, RequiredHeaderNumberRef}, - BlockNumberOf, Chain, Client, HashOf, HeaderIdOf, SyncHeader, + Chain, Client, HeaderIdOf, SyncHeader, }; use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient, MaybeConnectionError, + metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient, + MaybeConnectionError, +}; + +use crate::{ + finality_pipeline::{ + SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, RECENT_FINALITY_PROOFS_LIMIT, + }, + finality_target::SubstrateFinalityTarget, + STALL_TIMEOUT, }; -use std::fmt::Debug; /// On-demand Substrate <-> Substrate headers relay. /// -/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages relay) needs -/// it to continue its regular work. When enough headers are relayed, on-demand stops syncing headers. +/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages +/// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops +/// syncing headers. #[derive(Clone)] pub struct OnDemandHeadersRelay { /// Relay task name. @@ -52,11 +58,13 @@ pub struct OnDemandHeadersRelay { impl OnDemandHeadersRelay { /// Create new on-demand headers relay. - pub fn new( + pub fn new( source_client: Client, target_client: Client, - pipeline: SubstrateFinalityToSubstrate, + target_transactions_mortality: Option, + pipeline: P, maximal_headers_difference: SourceChain::BlockNumber, + only_mandatory_headers: bool, ) -> Self where SourceChain: Chain + Debug, @@ -64,15 +72,14 @@ impl OnDemandHeadersRelay { TargetChain: Chain + Debug, TargetChain::BlockNumber: BlockNumberBase, TargetSign: Clone + Send + Sync + 'static, - SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< - Hash = HashOf, - Number = BlockNumberOf, - Header = SyncHeader, - FinalityProof = GrandpaJustification, + P: SubstrateFinalitySyncPipeline< + FinalitySyncPipeline = SubstrateFinalityToSubstrate< + SourceChain, + TargetChain, + TargetSign, + >, TargetChain = TargetChain, >, - SubstrateFinalityTarget>: - FinalityTargetClient>, { let required_header_number = Arc::new(Mutex::new(Zero::zero())); let this = OnDemandHeadersRelay { @@ -83,8 +90,10 @@ impl OnDemandHeadersRelay { background_task( source_client, target_client, + target_transactions_mortality, pipeline, maximal_headers_difference, + only_mandatory_headers, required_header_number, ) .await; @@ -111,11 +120,13 @@ impl OnDemandHeadersRelay { } /// Background task that is responsible for starting headers relay. -async fn background_task( +async fn background_task( source_client: Client, target_client: Client, - pipeline: SubstrateFinalityToSubstrate, + target_transactions_mortality: Option, + pipeline: P, maximal_headers_difference: SourceChain::BlockNumber, + only_mandatory_headers: bool, required_header_number: RequiredHeaderNumberRef, ) where SourceChain: Chain + Debug, @@ -123,22 +134,21 @@ async fn background_task( TargetChain: Chain + Debug, TargetChain::BlockNumber: BlockNumberBase, TargetSign: Clone + Send + Sync + 'static, - SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< - Hash = HashOf, - Number = BlockNumberOf, - Header = SyncHeader, - FinalityProof = GrandpaJustification, + P: SubstrateFinalitySyncPipeline< + FinalitySyncPipeline = SubstrateFinalityToSubstrate, TargetChain = TargetChain, >, - SubstrateFinalityTarget>: - FinalityTargetClient>, { let relay_task_name = on_demand_headers_relay_name::(); let mut finality_source = SubstrateFinalitySource::< _, SubstrateFinalityToSubstrate, >::new(source_client.clone(), Some(required_header_number.clone())); - let mut finality_target = SubstrateFinalityTarget::new(target_client.clone(), pipeline.clone()); + let mut finality_target = SubstrateFinalityTarget::new( + target_client.clone(), + pipeline.clone(), + target_transactions_mortality, + ); let mut latest_non_mandatory_at_source = Zero::zero(); let mut restart_relay = true; @@ -165,12 +175,16 @@ async fn background_task( &mut finality_target, ) .await; - continue; + continue } // read best finalized source header number from target - let best_finalized_source_header_at_target = - best_finalized_source_header_at_target::(&finality_target, &relay_task_name).await; + let best_finalized_source_header_at_target = best_finalized_source_header_at_target::< + SourceChain, + _, + _, + >(&finality_target, &relay_task_name) + .await; if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) { relay_utils::relay_loop::reconnect_failed_client( FailedClient::Target, @@ -179,11 +193,12 @@ async fn background_task( &mut finality_target, ) .await; - continue; + continue } // submit mandatory header if some headers are missing - let best_finalized_source_header_at_target_fmt = format!("{:?}", best_finalized_source_header_at_target); + let best_finalized_source_header_at_target_fmt = + format!("{:?}", best_finalized_source_header_at_target); let mandatory_scan_range = mandatory_headers_scan_range::( best_finalized_source_header_at_source.ok(), best_finalized_source_header_at_target.ok(), @@ -209,8 +224,8 @@ async fn background_task( // there are no (or we don't need to relay them) mandatory headers in the range // => to avoid scanning the same headers over and over again, remember that latest_non_mandatory_at_source = mandatory_scan_range.1; - } - Err(e) => { + }, + Err(e) => if e.is_connection_error() { relay_utils::relay_loop::reconnect_failed_client( FailedClient::Source, @@ -219,9 +234,8 @@ async fn background_task( &mut finality_target, ) .await; - continue; - } - } + continue + }, } } @@ -232,10 +246,13 @@ async fn background_task( finality_source.clone(), finality_target.clone(), FinalitySyncParams { - tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL), + tick: std::cmp::max( + SourceChain::AVERAGE_BLOCK_INTERVAL, + TargetChain::AVERAGE_BLOCK_INTERVAL, + ), recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, stall_timeout: STALL_TIMEOUT, - only_mandatory_headers: false, + only_mandatory_headers, }, MetricsParams::disabled(), futures::future::pending(), @@ -281,12 +298,12 @@ async fn mandatory_headers_scan_range( .checked_sub(&best_finalized_source_header_at_target) .unwrap_or_else(Zero::zero); if current_headers_difference <= maximal_headers_difference { - return None; + return None } // if relay is already asked to sync headers, don't do anything yet if required_header_number > best_finalized_source_header_at_target { - return None; + return None } Some(( @@ -295,7 +312,8 @@ async fn mandatory_headers_scan_range( )) } -/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay it. +/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay +/// it. /// /// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. async fn relay_mandatory_header_from_range( @@ -310,7 +328,8 @@ where P: FinalitySyncPipeline, { // search for mandatory header first - let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?; + let mandatory_source_header_number = + find_mandatory_header_in_range(finality_source, range).await?; // if there are no mandatory headers - we have nothing to do let mandatory_source_header_number = match mandatory_source_header_number { @@ -322,7 +341,7 @@ where // less than our `mandatory_source_header_number` before logging anything let mut required_header_number = required_header_number.lock().await; if *required_header_number >= mandatory_source_header_number { - return Ok(false); + return Ok(false) } log::trace!( @@ -350,19 +369,16 @@ where SubstrateFinalitySource: FinalitySourceClient

, P: FinalitySyncPipeline, { - finality_source - .on_chain_best_finalized_block_number() - .await - .map_err(|error| { - log::error!( - target: "bridge", - "Failed to read best finalized source header from source in {} relay: {:?}", - relay_task_name, - error, - ); + finality_source.on_chain_best_finalized_block_number().await.map_err(|error| { + log::error!( + target: "bridge", + "Failed to read best finalized source header from source in {} relay: {:?}", + relay_task_name, + error, + ); - error - }) + error + }) } /// Read best finalized source block number from target client. @@ -373,22 +389,20 @@ async fn best_finalized_source_header_at_target Result as RelayClient>::Error> where - SubstrateFinalityTarget: FinalityTargetClient

, - P: FinalitySyncPipeline, + SubstrateFinalityTarget: FinalityTargetClient, + P: SubstrateFinalitySyncPipeline, + P::FinalitySyncPipeline: FinalitySyncPipeline, { - finality_target - .best_finalized_source_block_number() - .await - .map_err(|error| { - log::error!( - target: "bridge", - "Failed to read best finalized source header from target in {} relay: {:?}", - relay_task_name, - error, - ); + finality_target.best_finalized_source_block_number().await.map_err(|error| { + log::error!( + target: "bridge", + "Failed to read best finalized source header from target in {} relay: {:?}", + relay_task_name, + error, + ); - error - }) + error + }) } /// Read first mandatory header in given inclusive range. @@ -404,9 +418,10 @@ where { let mut current = range.0; while current <= range.1 { - let header: SyncHeader = finality_source.client().header_by_number(current).await?.into(); + let header: SyncHeader = + finality_source.client().header_by_number(current).await?.into(); if header.is_mandatory() { - return Ok(Some(current)); + return Ok(Some(current)) } current += One::one(); @@ -424,15 +439,21 @@ fn on_demand_headers_relay_name() -> Str mod tests { use super::*; - type TestChain = relay_millau_client::Millau; + type TestChain = relay_rococo_client::Rococo; - const AT_SOURCE: Option = Some(10); - const AT_TARGET: Option = Some(1); + const AT_SOURCE: Option = Some(10); + const AT_TARGET: Option = Some(1); #[async_std::test] async fn mandatory_headers_scan_range_selects_range_if_too_many_headers_are_missing() { assert_eq!( - mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, 5, &Arc::new(Mutex::new(0))).await, + mandatory_headers_scan_range::( + AT_SOURCE, + AT_TARGET, + 5, + &Arc::new(Mutex::new(0)) + ) + .await, Some((AT_TARGET.unwrap() + 1, AT_SOURCE.unwrap())), ); } @@ -440,7 +461,13 @@ mod tests { #[async_std::test] async fn mandatory_headers_scan_range_selects_nothing_if_enough_headers_are_relayed() { assert_eq!( - mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, 10, &Arc::new(Mutex::new(0))).await, + mandatory_headers_scan_range::( + AT_SOURCE, + AT_TARGET, + 10, + &Arc::new(Mutex::new(0)) + ) + .await, None, ); } diff --git a/polkadot/bridges/relays/messages/Cargo.toml b/polkadot/bridges/relays/messages/Cargo.toml index ea5d46845c5a3ccde4e1843126863ac3c89beed3..b11f00b957a42b55c35023900181cca8b11a93fb 100644 --- a/polkadot/bridges/relays/messages/Cargo.toml +++ b/polkadot/bridges/relays/messages/Cargo.toml @@ -19,3 +19,5 @@ parking_lot = "0.11.0" bp-messages = { path = "../../primitives/messages" } bp-runtime = { path = "../../primitives/runtime" } relay-utils = { path = "../utils" } + +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/messages/src/lib.rs b/polkadot/bridges/relays/messages/src/lib.rs index cdd94bca9541260a4705256f67e9ff4c65fda873..c9e460300342f46973fe631382a05793478a87eb 100644 --- a/polkadot/bridges/relays/messages/src/lib.rs +++ b/polkadot/bridges/relays/messages/src/lib.rs @@ -18,7 +18,7 @@ //! data. Message lane allows sending arbitrary messages between bridged chains. This //! module provides entrypoint that starts reading messages from given message lane //! of source chain and submits proof-of-message-at-source-chain transactions to the -//! target chain. Additionaly, proofs-of-messages-delivery are sent back from the +//! target chain. Additionally, proofs-of-messages-delivery are sent back from the //! target chain to the source chain. // required for futures::select! @@ -29,6 +29,7 @@ mod metrics; pub mod message_lane; pub mod message_lane_loop; +pub mod relay_strategy; mod message_race_delivery; mod message_race_loop; diff --git a/polkadot/bridges/relays/messages/src/message_lane.rs b/polkadot/bridges/relays/messages/src/message_lane.rs index 8757e9322ce4473c230cf34a0e631aac94017d30..5c9728ad93abd5aa1ea9b2fc77b2a6f9968539f6 100644 --- a/polkadot/bridges/relays/messages/src/message_lane.rs +++ b/polkadot/bridges/relays/messages/src/message_lane.rs @@ -21,7 +21,8 @@ use num_traits::{SaturatingAdd, Zero}; use relay_utils::{BlockNumberBase, HeaderId}; -use std::fmt::Debug; +use sp_arithmetic::traits::AtLeast32BitUnsigned; +use std::{fmt::Debug, ops::Sub}; /// One-way message lane. pub trait MessageLane: 'static + Clone + Send + Sync { @@ -40,7 +41,16 @@ pub trait MessageLane: 'static + Clone + Send + Sync { /// 1) pay transaction fees; /// 2) pay message delivery and dispatch fee; /// 3) pay relayer rewards. - type SourceChainBalance: Clone + Copy + Debug + PartialOrd + SaturatingAdd + Zero + Send + Sync; + type SourceChainBalance: AtLeast32BitUnsigned + + Clone + + Copy + + Debug + + PartialOrd + + Sub + + SaturatingAdd + + Zero + + Send + + Sync; /// Number of the source header. type SourceHeaderNumber: BlockNumberBase; /// Hash of the source header. @@ -53,7 +63,9 @@ pub trait MessageLane: 'static + Clone + Send + Sync { } /// Source header id within given one-way message lane. -pub type SourceHeaderIdOf

= HeaderId<

::SourceHeaderHash,

::SourceHeaderNumber>; +pub type SourceHeaderIdOf

= + HeaderId<

::SourceHeaderHash,

::SourceHeaderNumber>; /// Target header id within given one-way message lane. -pub type TargetHeaderIdOf

= HeaderId<

::TargetHeaderHash,

::TargetHeaderNumber>; +pub type TargetHeaderIdOf

= + HeaderId<

::TargetHeaderHash,

::TargetHeaderNumber>; diff --git a/polkadot/bridges/relays/messages/src/message_lane_loop.rs b/polkadot/bridges/relays/messages/src/message_lane_loop.rs index 32c24985a447e4cd58085f14aebf2136cc5256ea..1e7dc6e65fd670d37f73840302afa5d4962d8abd 100644 --- a/polkadot/bridges/relays/messages/src/message_lane_loop.rs +++ b/polkadot/bridges/relays/messages/src/message_lane_loop.rs @@ -24,27 +24,29 @@ //! finalized header. I.e. when talking about headers in lane context, we //! only care about finalized headers. -use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use crate::message_race_delivery::run as run_message_delivery_race; -use crate::message_race_receiving::run as run_message_receiving_race; -use crate::metrics::MessageLaneLoopMetrics; +use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration}; use async_trait::async_trait; +use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; + use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; use bp_runtime::messages::DispatchFeePayment; -use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; use relay_utils::{ - interval, - metrics::{GlobalMetrics, MetricsParams}, - process_future_result, - relay_loop::Client as RelayClient, + interval, metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, FailedClient, }; -use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration}; + +use crate::{ + message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, + message_race_delivery::run as run_message_delivery_race, + message_race_receiving::run as run_message_receiving_race, + metrics::MessageLaneLoopMetrics, + relay_strategy::RelayStrategy, +}; /// Message lane loop configuration params. #[derive(Debug, Clone)] -pub struct Params { +pub struct Params { /// Id of lane this loop is servicing. pub lane: LaneId, /// Interval at which we ask target node about its updates. @@ -56,7 +58,7 @@ pub struct Params { /// The loop will auto-restart if there has been no updates during this period. pub stall_timeout: Duration, /// Message delivery race parameters. - pub delivery_params: MessageDeliveryParams, + pub delivery_params: MessageDeliveryParams, } /// Relayer operating mode. @@ -64,20 +66,22 @@ pub struct Params { pub enum RelayerMode { /// The relayer doesn't care about rewards. Altruistic, - /// The relayer will deliver all messages and confirmations as long as he's not losing any funds. - NoLosses, + /// The relayer will deliver all messages and confirmations as long as he's not losing any + /// funds. + Rational, } /// Message delivery race parameters. #[derive(Debug, Clone)] -pub struct MessageDeliveryParams { - /// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number of entries - /// in the `InboundLaneData::relayers` set, all new messages will be rejected until reward payment will - /// be proved (by including outbound lane state to the message delivery transaction). +pub struct MessageDeliveryParams { + /// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number + /// of entries in the `InboundLaneData::relayers` set, all new messages will be rejected until + /// reward payment will be proved (by including outbound lane state to the message delivery + /// transaction). pub max_unrewarded_relayer_entries_at_target: MessageNonce, - /// Message delivery race will stop delivering messages if there are `max_unconfirmed_nonces_at_target` - /// unconfirmed nonces on the target node. The race would continue once they're confirmed by the - /// receiving race. + /// Message delivery race will stop delivering messages if there are + /// `max_unconfirmed_nonces_at_target` unconfirmed nonces on the target node. The race would + /// continue once they're confirmed by the receiving race. pub max_unconfirmed_nonces_at_target: MessageNonce, /// Maximal number of relayed messages in single delivery transaction. pub max_messages_in_single_batch: MessageNonce, @@ -85,8 +89,8 @@ pub struct MessageDeliveryParams { pub max_messages_weight_in_single_batch: Weight, /// Maximal cumulative size of relayed messages in single delivery transaction. pub max_messages_size_in_single_batch: u32, - /// Relayer operating mode. - pub relayer_mode: RelayerMode, + /// Relay strategy + pub relay_strategy: Strategy, } /// Message details. @@ -103,7 +107,8 @@ pub struct MessageDetails { } /// Messages details map. -pub type MessageDetailsMap = BTreeMap>; +pub type MessageDetailsMap = + BTreeMap>; /// Message delivery race proof parameters. #[derive(Debug, PartialEq)] @@ -125,6 +130,7 @@ pub trait SourceClient: RelayClient { &self, id: SourceHeaderIdOf

, ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; + /// Get nonce of the latest message, which receiving has been confirmed by the target chain. async fn latest_confirmed_received_nonce( &self, @@ -175,11 +181,12 @@ pub trait TargetClient: RelayClient { id: TargetHeaderIdOf

, ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - /// Get nonce of latest confirmed message. + /// Get nonce of the latest confirmed message. async fn latest_confirmed_received_nonce( &self, id: TargetHeaderIdOf

, ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; + /// Get state of unrewarded relayers set at the inbound lane. async fn unrewarded_relayers_state( &self, @@ -210,19 +217,21 @@ pub trait TargetClient: RelayClient { async fn estimate_delivery_transaction_in_source_tokens( &self, nonces: RangeInclusive, + total_prepaid_nonces: MessageNonce, total_dispatch_weight: Weight, total_size: u32, - ) -> P::SourceChainBalance; + ) -> Result; } /// State of the client. #[derive(Clone, Debug, Default, PartialEq)] pub struct ClientState { - /// Best header id of this chain. + /// The best header id of this chain. pub best_self: SelfHeaderId, /// Best finalized header id of this chain. pub best_finalized_self: SelfHeaderId, - /// Best finalized header id of the peer chain read at the best block of this chain (at `best_finalized_self`). + /// Best finalized header id of the peer chain read at the best block of this chain (at + /// `best_finalized_self`). pub best_finalized_peer_at_best_self: PeerHeaderId, } @@ -241,50 +250,48 @@ pub struct ClientsState { pub target: Option>, } -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs +/// sync loop. pub fn metrics_prefix(lane: &LaneId) -> String { - format!( - "{}_to_{}_MessageLane_{}", - P::SOURCE_NAME, - P::TARGET_NAME, - hex::encode(lane) - ) + format!("{}_to_{}_MessageLane_{}", P::SOURCE_NAME, P::TARGET_NAME, hex::encode(lane)) } /// Run message lane service loop. -pub async fn run( - params: Params, +pub async fn run( + params: Params, source_client: impl SourceClient

, target_client: impl TargetClient

, metrics_params: MetricsParams, exit_signal: impl Future + Send + 'static, -) -> Result<(), String> { +) -> Result<(), relay_utils::Error> { let exit_signal = exit_signal.shared(); relay_utils::relay_loop(source_client, target_client) .reconnect_delay(params.reconnect_delay) - .with_metrics(Some(metrics_prefix::

(¶ms.lane)), metrics_params) - .loop_metric(|registry, prefix| MessageLaneLoopMetrics::new(registry, prefix))? - .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .with_metrics(metrics_params) + .loop_metric(MessageLaneLoopMetrics::new(Some(&metrics_prefix::

(¶ms.lane)))?)? .expose() .await? - .run( - metrics_prefix::

(¶ms.lane), - move |source_client, target_client, metrics| { - run_until_connection_lost( - params.clone(), - source_client, - target_client, - metrics, - exit_signal.clone(), - ) - }, - ) + .run(metrics_prefix::

(¶ms.lane), move |source_client, target_client, metrics| { + run_until_connection_lost( + params.clone(), + source_client, + target_client, + metrics, + exit_signal.clone(), + ) + }) .await } -/// Run one-way message delivery loop until connection with target or source node is lost, or exit signal is received. -async fn run_until_connection_lost, TC: TargetClient

>( - params: Params, +/// Run one-way message delivery loop until connection with target or source node is lost, or exit +/// signal is received. +async fn run_until_connection_lost< + P: MessageLane, + Strategy: RelayStrategy, + SC: SourceClient

, + TC: TargetClient

, +>( + params: Params, source_client: SC, target_client: TC, metrics_msg: Option, @@ -446,11 +453,16 @@ async fn run_until_connection_lost, TC: Targ #[cfg(test)] pub(crate) mod tests { - use super::*; + use std::sync::Arc; + use futures::stream::StreamExt; use parking_lot::Mutex; + use relay_utils::{HeaderId, MaybeConnectionError}; - use std::sync::Arc; + + use crate::relay_strategy::AltruisticStrategy; + + use super::*; pub fn header_id(number: TestSourceHeaderNumber) -> TestSourceHeaderId { HeaderId(number, number) @@ -554,7 +566,7 @@ pub(crate) mod tests { let mut data = self.data.lock(); (self.tick)(&mut *data); if data.is_source_fails { - return Err(TestError); + return Err(TestError) } Ok(data.source_state.clone()) } @@ -566,7 +578,7 @@ pub(crate) mod tests { let mut data = self.data.lock(); (self.tick)(&mut *data); if data.is_source_fails { - return Err(TestError); + return Err(TestError) } Ok((id, data.source_latest_generated_nonce)) } @@ -606,11 +618,7 @@ pub(crate) mod tests { nonces: RangeInclusive, proof_parameters: MessageProofParameters, ) -> Result< - ( - SourceHeaderIdOf, - RangeInclusive, - TestMessagesProof, - ), + (SourceHeaderIdOf, RangeInclusive, TestMessagesProof), TestError, > { let mut data = self.data.lock(); @@ -691,7 +699,7 @@ pub(crate) mod tests { let mut data = self.data.lock(); (self.tick)(&mut *data); if data.is_target_fails { - return Err(TestError); + return Err(TestError) } Ok(data.target_state.clone()) } @@ -703,7 +711,7 @@ pub(crate) mod tests { let mut data = self.data.lock(); (self.tick)(&mut *data); if data.is_target_fails { - return Err(TestError); + return Err(TestError) } Ok((id, data.target_latest_received_nonce)) } @@ -729,7 +737,7 @@ pub(crate) mod tests { let mut data = self.data.lock(); (self.tick)(&mut *data); if data.is_target_fails { - return Err(TestError); + return Err(TestError) } Ok((id, data.target_latest_confirmed_received_nonce)) } @@ -750,14 +758,15 @@ pub(crate) mod tests { let mut data = self.data.lock(); (self.tick)(&mut *data); if data.is_target_fails { - return Err(TestError); + return Err(TestError) } data.target_state.best_self = HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); data.target_state.best_finalized_self = data.target_state.best_self; data.target_latest_received_nonce = *proof.0.end(); if let Some(target_latest_confirmed_received_nonce) = proof.1 { - data.target_latest_confirmed_received_nonce = target_latest_confirmed_received_nonce; + data.target_latest_confirmed_received_nonce = + target_latest_confirmed_received_nonce; } data.submitted_messages_proofs.push(proof); Ok(nonces) @@ -773,12 +782,13 @@ pub(crate) mod tests { async fn estimate_delivery_transaction_in_source_tokens( &self, nonces: RangeInclusive, + _total_prepaid_nonces: MessageNonce, total_dispatch_weight: Weight, total_size: u32, - ) -> TestSourceChainBalance { - BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1) - + total_dispatch_weight - + total_size as TestSourceChainBalance + ) -> Result { + Ok(BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1) + + total_dispatch_weight + + total_size as TestSourceChainBalance) } } @@ -791,14 +801,8 @@ pub(crate) mod tests { async_std::task::block_on(async { let data = Arc::new(Mutex::new(data)); - let source_client = TestSourceClient { - data: data.clone(), - tick: source_tick, - }; - let target_client = TestTargetClient { - data: data.clone(), - tick: target_tick, - }; + let source_client = TestSourceClient { data: data.clone(), tick: source_tick }; + let target_client = TestTargetClient { data: data.clone(), tick: target_tick }; let _ = run( Params { lane: [0, 0, 0, 0], @@ -812,7 +816,7 @@ pub(crate) mod tests { max_messages_in_single_batch: 4, max_messages_weight_in_single_batch: 4, max_messages_size_in_single_batch: 4, - relayer_mode: RelayerMode::Altruistic, + relay_strategy: AltruisticStrategy, }, }, source_client, @@ -901,7 +905,10 @@ pub(crate) mod tests { data.source_state.best_finalized_self = data.source_state.best_self; // headers relay must only be started when we need new target headers at source node if data.target_to_source_header_required.is_some() { - assert!(data.source_state.best_finalized_peer_at_best_self.0 < data.target_state.best_self.0); + assert!( + data.source_state.best_finalized_peer_at_best_self.0 < + data.target_state.best_self.0 + ); data.target_to_source_header_required = None; } // syncing target headers -> source chain @@ -918,7 +925,10 @@ pub(crate) mod tests { data.target_state.best_finalized_self = data.target_state.best_self; // headers relay must only be started when we need new source headers at target node if data.source_to_target_header_required.is_some() { - assert!(data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_self.0); + assert!( + data.target_state.best_finalized_peer_at_best_self.0 < + data.source_state.best_self.0 + ); data.source_to_target_header_required = None; } // syncing source headers -> target chain diff --git a/polkadot/bridges/relays/messages/src/message_race_delivery.rs b/polkadot/bridges/relays/messages/src/message_race_delivery.rs index bde09af7068fdc834b965b7bc36a33c9ea3ade4d..dc994364f1787989e1ebafca1960429c52c5578a 100644 --- a/polkadot/bridges/relays/messages/src/message_race_delivery.rs +++ b/polkadot/bridges/relays/messages/src/message_race_delivery.rs @@ -11,43 +11,41 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -//! Message delivery race delivers proof-of-messages from lane.source to lane.target. +//! Message delivery race delivers proof-of-messages from "lane.source" to "lane.target". -use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use crate::message_lane_loop::{ - MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, RelayerMode, - SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient, - TargetClientState, -}; -use crate::message_race_loop::{ - MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, TargetClient, - TargetClientNonces, -}; -use crate::message_race_strategy::{BasicStrategy, SourceRangesQueue}; -use crate::metrics::MessageLaneLoopMetrics; +use std::{collections::VecDeque, marker::PhantomData, ops::RangeInclusive, time::Duration}; use async_trait::async_trait; -use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; -use bp_runtime::messages::DispatchFeePayment; use futures::stream::FusedStream; -use num_traits::{SaturatingAdd, Zero}; + +use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; use relay_utils::FailedClient; -use std::{ - collections::VecDeque, - marker::PhantomData, - ops::{Range, RangeInclusive}, - time::Duration, + +use crate::{ + message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, + message_lane_loop::{ + MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, + SourceClient as MessageLaneSourceClient, SourceClientState, + TargetClient as MessageLaneTargetClient, TargetClientState, + }, + message_race_loop::{ + MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, + TargetClient, TargetClientNonces, + }, + message_race_strategy::BasicStrategy, + metrics::MessageLaneLoopMetrics, + relay_strategy::{EnforcementStrategy, RelayMessagesBatchReference, RelayStrategy}, }; /// Run message delivery race. -pub async fn run( +pub async fn run( source_client: impl MessageLaneSourceClient

, source_state_updates: impl FusedStream>, target_client: impl MessageLaneTargetClient

, target_state_updates: impl FusedStream>, stall_timeout: Duration, metrics_msg: Option, - params: MessageDeliveryParams, + params: MessageDeliveryParams, ) -> Result<(), FailedClient> { crate::message_race_loop::run( MessageDeliveryRaceSource { @@ -63,15 +61,16 @@ pub async fn run( }, target_state_updates, stall_timeout, - MessageDeliveryStrategy:: { + MessageDeliveryStrategy:: { lane_source_client: source_client, lane_target_client: target_client, - max_unrewarded_relayer_entries_at_target: params.max_unrewarded_relayer_entries_at_target, + max_unrewarded_relayer_entries_at_target: params + .max_unrewarded_relayer_entries_at_target, max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, max_messages_in_single_batch: params.max_messages_in_single_batch, max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch, max_messages_size_in_single_batch: params.max_messages_size_in_single_batch, - relayer_mode: params.relayer_mode, + relay_strategy: params.relay_strategy, latest_confirmed_nonces_at_source: VecDeque::new(), target_nonces: None, strategy: BasicStrategy::new(), @@ -121,8 +120,10 @@ where at_block: SourceHeaderIdOf

, prev_latest_nonce: MessageNonce, ) -> Result<(SourceHeaderIdOf

, SourceClientNonces), Self::Error> { - let (at_block, latest_generated_nonce) = self.client.latest_generated_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; + let (at_block, latest_generated_nonce) = + self.client.latest_generated_nonce(at_block).await?; + let (at_block, latest_confirmed_nonce) = + self.client.latest_confirmed_received_nonce(at_block).await?; if let Some(metrics_msg) = self.metrics_msg.as_ref() { metrics_msg.update_source_latest_generated_nonce::

(latest_generated_nonce); @@ -131,7 +132,10 @@ where let new_nonces = if latest_generated_nonce > prev_latest_nonce { self.client - .generated_message_details(at_block.clone(), prev_latest_nonce + 1..=latest_generated_nonce) + .generated_message_details( + at_block.clone(), + prev_latest_nonce + 1..=latest_generated_nonce, + ) .await? } else { MessageDetailsMap::new() @@ -139,10 +143,7 @@ where Ok(( at_block, - SourceClientNonces { - new_nonces, - confirmed_nonce: Some(latest_confirmed_nonce), - }, + SourceClientNonces { new_nonces, confirmed_nonce: Some(latest_confirmed_nonce) }, )) } @@ -151,7 +152,8 @@ where at_block: SourceHeaderIdOf

, nonces: RangeInclusive, proof_parameters: Self::ProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error> { + ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error> + { self.client.prove_messages(at_block, nonces, proof_parameters).await } } @@ -180,10 +182,13 @@ where &self, at_block: TargetHeaderIdOf

, update_metrics: bool, - ) -> Result<(TargetHeaderIdOf

, TargetClientNonces), Self::Error> { + ) -> Result<(TargetHeaderIdOf

, TargetClientNonces), Self::Error> + { let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; - let (at_block, unrewarded_relayers) = self.client.unrewarded_relayers_state(at_block).await?; + let (at_block, latest_confirmed_nonce) = + self.client.latest_confirmed_received_nonce(at_block).await?; + let (at_block, unrewarded_relayers) = + self.client.unrewarded_relayers_state(at_block).await?; if update_metrics { if let Some(metrics_msg) = self.metrics_msg.as_ref() { @@ -210,16 +215,14 @@ where nonces: RangeInclusive, proof: P::MessagesProof, ) -> Result, Self::Error> { - self.client - .submit_messages_proof(generated_at_block, nonces, proof) - .await + self.client.submit_messages_proof(generated_at_block, nonces, proof).await } } /// Additional nonces data from the target client used by message delivery race. #[derive(Debug, Clone)] struct DeliveryRaceTargetNoncesData { - /// Latest nonce that we know: (1) has been delivered to us (2) has been confirmed + /// The latest nonce that we know: (1) has been delivered to us (2) has been confirmed /// back to the source node (by confirmations race) and (3) relayer has received /// reward for (and this has been confirmed by the message delivery race). confirmed_nonce: MessageNonce, @@ -228,7 +231,7 @@ struct DeliveryRaceTargetNoncesData { } /// Messages delivery strategy. -struct MessageDeliveryStrategy { +struct MessageDeliveryStrategy { /// The client that is connected to the message lane source node. lane_source_client: SC, /// The client that is connected to the message lane target node. @@ -244,8 +247,9 @@ struct MessageDeliveryStrategy { /// Maximal messages size in the single delivery transaction. max_messages_size_in_single_batch: u32, /// Relayer operating mode. - relayer_mode: RelayerMode, - /// Latest confirmed nonces at the source client + the header id where we have first met this nonce. + relay_strategy: Strategy, + /// Latest confirmed nonces at the source client + the header id where we have first met this + /// nonce. latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf

, MessageNonce)>, /// Target nonces from the source client. target_nonces: Option>, @@ -262,37 +266,27 @@ type MessageDeliveryStrategyBase

= BasicStrategy<

::MessagesProof, >; -impl std::fmt::Debug for MessageDeliveryStrategy { +impl std::fmt::Debug + for MessageDeliveryStrategy +{ fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("MessageDeliveryStrategy") .field( "max_unrewarded_relayer_entries_at_target", &self.max_unrewarded_relayer_entries_at_target, ) - .field( - "max_unconfirmed_nonces_at_target", - &self.max_unconfirmed_nonces_at_target, - ) + .field("max_unconfirmed_nonces_at_target", &self.max_unconfirmed_nonces_at_target) .field("max_messages_in_single_batch", &self.max_messages_in_single_batch) - .field( - "max_messages_weight_in_single_batch", - &self.max_messages_weight_in_single_batch, - ) - .field( - "max_messages_size_in_single_batch", - &self.max_messages_size_in_single_batch, - ) - .field( - "latest_confirmed_nonces_at_source", - &self.latest_confirmed_nonces_at_source, - ) + .field("max_messages_weight_in_single_batch", &self.max_messages_weight_in_single_batch) + .field("max_messages_size_in_single_batch", &self.max_messages_size_in_single_batch) + .field("latest_confirmed_nonces_at_source", &self.latest_confirmed_nonces_at_source) .field("target_nonces", &self.target_nonces) .field("strategy", &self.strategy) .finish() } } -impl MessageDeliveryStrategy { +impl MessageDeliveryStrategy { /// Returns total weight of all undelivered messages. fn total_queued_dispatch_weight(&self) -> Weight { self.strategy @@ -304,8 +298,9 @@ impl MessageDeliveryStrategy { } #[async_trait] -impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> - for MessageDeliveryStrategy +impl + RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> + for MessageDeliveryStrategy where P: MessageLane, SC: MessageLaneSourceClient

, @@ -319,8 +314,12 @@ where self.strategy.is_empty() } - fn required_source_header_at_target(&self, current_best: &SourceHeaderIdOf

) -> Option> { - let header_required_for_messages_delivery = self.strategy.required_source_header_at_target(current_best); + fn required_source_header_at_target( + &self, + current_best: &SourceHeaderIdOf

, + ) -> Option> { + let header_required_for_messages_delivery = + self.strategy.required_source_header_at_target(current_best); let header_required_for_reward_confirmations_delivery = self.latest_confirmed_nonces_at_source.back().map(|(id, _)| id.clone()); match ( @@ -371,10 +370,7 @@ where self.target_nonces = Some(target_nonces); self.strategy.best_target_nonces_updated( - TargetClientNonces { - latest_nonce: nonces.latest_nonce, - nonces_data: (), - }, + TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () }, race_state, ) } @@ -399,14 +395,12 @@ where } if let Some(ref mut target_nonces) = self.target_nonces { - target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); + target_nonces.latest_nonce = + std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); } self.strategy.finalized_target_nonces_updated( - TargetClientNonces { - latest_nonce: nonces.latest_nonce, - nonces_data: (), - }, + TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () }, race_state, ) } @@ -428,12 +422,15 @@ where // There's additional condition in the message delivery race: target would reject messages // if there are too much unconfirmed messages at the inbound lane. - // The receiving race is responsible to deliver confirmations back to the source chain. So if - // there's a lot of unconfirmed messages, let's wait until it'll be able to do its job. + // The receiving race is responsible to deliver confirmations back to the source chain. So + // if there's a lot of unconfirmed messages, let's wait until it'll be able to do its job. let latest_received_nonce_at_target = target_nonces.latest_nonce; - let confirmations_missing = latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source); + let confirmations_missing = + latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source); match confirmations_missing { - Some(confirmations_missing) if confirmations_missing >= self.max_unconfirmed_nonces_at_target => { + Some(confirmations_missing) + if confirmations_missing >= self.max_unconfirmed_nonces_at_target => + { log::debug!( target: "bridge", "Cannot deliver any more messages from {} to {}. Too many unconfirmed nonces \ @@ -445,50 +442,55 @@ where self.max_unconfirmed_nonces_at_target, ); - return None; - } + return None + }, _ => (), } - // Ok - we may have new nonces to deliver. But target may still reject new messages, because we haven't - // notified it that (some) messages have been confirmed. So we may want to include updated - // `source.latest_confirmed` in the proof. + // Ok - we may have new nonces to deliver. But target may still reject new messages, because + // we haven't notified it that (some) messages have been confirmed. So we may want to + // include updated `source.latest_confirmed` in the proof. // - // Important note: we're including outbound state lane proof whenever there are unconfirmed nonces - // on the target chain. Other strategy is to include it only if it's absolutely necessary. + // Important note: we're including outbound state lane proof whenever there are unconfirmed + // nonces on the target chain. Other strategy is to include it only if it's absolutely + // necessary. let latest_confirmed_nonce_at_target = target_nonces.nonces_data.confirmed_nonce; - let outbound_state_proof_required = latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source; + let outbound_state_proof_required = + latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source; // The target node would also reject messages if there are too many entries in the // "unrewarded relayers" set. If we are unable to prove new rewards to the target node, then // we should wait for confirmations race. let unrewarded_relayer_entries_limit_reached = - target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries - >= self.max_unrewarded_relayer_entries_at_target; + target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries >= + self.max_unrewarded_relayer_entries_at_target; if unrewarded_relayer_entries_limit_reached { // so there are already too many unrewarded relayer entries in the set // - // => check if we can prove enough rewards. If not, we should wait for more rewards to be paid + // => check if we can prove enough rewards. If not, we should wait for more rewards to + // be paid let number_of_rewards_being_proved = latest_confirmed_nonce_at_source.saturating_sub(latest_confirmed_nonce_at_target); - let enough_rewards_being_proved = number_of_rewards_being_proved - >= target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry; + let enough_rewards_being_proved = number_of_rewards_being_proved >= + target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry; if !enough_rewards_being_proved { - return None; + return None } } - // If we're here, then the confirmations race did its job && sending side now knows that messages - // have been delivered. Now let's select nonces that we want to deliver. + // If we're here, then the confirmations race did its job && sending side now knows that + // messages have been delivered. Now let's select nonces that we want to deliver. // // We may deliver at most: // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_target) + // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - + // latest_confirmed_nonce_at_target) // - // messages in the batch. But since we're including outbound state proof in the batch, then it - // may be increased to: + // messages in the batch. But since we're including outbound state proof in the batch, then + // it may be increased to: // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_source) + // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - + // latest_confirmed_nonce_at_source) let future_confirmed_nonce_at_target = if outbound_state_proof_required { latest_confirmed_nonce_at_source } else { @@ -501,24 +503,26 @@ where let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; - let relayer_mode = self.relayer_mode; let lane_source_client = self.lane_source_client.clone(); let lane_target_client = self.lane_target_client.clone(); - let maximal_source_queue_index = self.strategy.maximal_available_source_queue_index(race_state)?; + let maximal_source_queue_index = + self.strategy.maximal_available_source_queue_index(race_state)?; let previous_total_dispatch_weight = self.total_queued_dispatch_weight(); let source_queue = self.strategy.source_queue(); - let range_end = select_nonces_for_delivery_transaction( - relayer_mode, - max_nonces, + + let reference = RelayMessagesBatchReference { + max_messages_in_this_batch: max_nonces, max_messages_weight_in_single_batch, max_messages_size_in_single_batch, - lane_source_client.clone(), - lane_target_client.clone(), - source_queue, - 0..maximal_source_queue_index + 1, - ) - .await?; + lane_source_client: lane_source_client.clone(), + lane_target_client: lane_target_client.clone(), + nonces_queue: source_queue.clone(), + nonces_queue_range: 0..maximal_source_queue_index + 1, + }; + + let mut strategy = EnforcementStrategy::new(self.relay_strategy.clone()); + let range_end = strategy.decide(reference).await?; let range_begin = source_queue[0].1.begin(); let selected_nonces = range_begin..=range_end; @@ -529,207 +533,11 @@ where Some(( selected_nonces, - MessageProofParameters { - outbound_state_proof_required, - dispatch_weight, - }, + MessageProofParameters { outbound_state_proof_required, dispatch_weight }, )) } } -/// From given set of source nonces, that are ready to be delivered, select nonces -/// to fit into single delivery transaction. -/// -/// The function returns nonces that are NOT selected for current batch and will be -/// delivered later. -#[allow(clippy::too_many_arguments)] -async fn select_nonces_for_delivery_transaction( - relayer_mode: RelayerMode, - max_messages_in_this_batch: MessageNonce, - max_messages_weight_in_single_batch: Weight, - max_messages_size_in_single_batch: u32, - lane_source_client: impl MessageLaneSourceClient

, - lane_target_client: impl MessageLaneTargetClient

, - nonces_queue: &SourceRangesQueue< - P::SourceHeaderHash, - P::SourceHeaderNumber, - MessageDetailsMap, - >, - nonces_queue_range: Range, -) -> Option { - let mut hard_selected_count = 0; - let mut soft_selected_count = 0; - - let mut selected_weight: Weight = 0; - let mut selected_unpaid_weight: Weight = 0; - let mut selected_size: u32 = 0; - let mut selected_count: MessageNonce = 0; - - let mut total_reward = P::SourceChainBalance::zero(); - let mut total_confirmations_cost = P::SourceChainBalance::zero(); - let mut total_cost = P::SourceChainBalance::zero(); - - // technically, multiple confirmations will be delivered in a single transaction, - // meaning less loses for relayer. But here we don't know the final relayer yet, so - // we're adding a separate transaction for every message. Normally, this cost is covered - // by the message sender. Probably reconsider this? - let confirmation_transaction_cost = if relayer_mode != RelayerMode::Altruistic { - lane_source_client.estimate_confirmation_transaction().await - } else { - Zero::zero() - }; - - let all_ready_nonces = nonces_queue - .range(nonces_queue_range.clone()) - .flat_map(|(_, ready_nonces)| ready_nonces.iter()) - .enumerate(); - for (index, (nonce, details)) in all_ready_nonces { - // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` - // and `max_messages_size_in_single_batch`, we may still try to submit transaction - // with single message if message overflows these limits. The worst case would be if - // transaction will be rejected by the target runtime, but at least we have tried. - - // limit messages in the batch by weight - let new_selected_weight = match selected_weight.checked_add(details.dispatch_weight) { - Some(new_selected_weight) if new_selected_weight <= max_messages_weight_in_single_batch => { - new_selected_weight - } - new_selected_weight if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with declared dispatch \ - weight {:?} that overflows maximal configured weight {}", - new_selected_weight, - max_messages_weight_in_single_batch, - ); - new_selected_weight.unwrap_or(Weight::MAX) - } - _ => break, - }; - - // limit messages in the batch by size - let new_selected_size = match selected_size.checked_add(details.size) { - Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch => new_selected_size, - new_selected_size if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with message \ - size {:?} that overflows maximal configured size {}", - new_selected_size, - max_messages_size_in_single_batch, - ); - new_selected_size.unwrap_or(u32::MAX) - } - _ => break, - }; - - // limit number of messages in the batch - let new_selected_count = selected_count + 1; - if new_selected_count > max_messages_in_this_batch { - break; - } - - // If dispatch fee has been paid at the source chain, it means that it is **relayer** who's - // paying for dispatch at the target chain AND reward must cover this dispatch fee. - // - // If dispatch fee is paid at the target chain, it means that it'll be withdrawn from the - // dispatch origin account AND reward is not covering this fee. - // - // So in the latter case we're not adding the dispatch weight to the delivery transaction weight. - let new_selected_unpaid_weight = match details.dispatch_fee_payment { - DispatchFeePayment::AtSourceChain => selected_unpaid_weight.saturating_add(details.dispatch_weight), - DispatchFeePayment::AtTargetChain => selected_unpaid_weight, - }; - - // now the message has passed all 'strong' checks, and we CAN deliver it. But do we WANT - // to deliver it? It depends on the relayer strategy. - match relayer_mode { - RelayerMode::Altruistic => { - soft_selected_count = index + 1; - } - RelayerMode::NoLosses => { - let delivery_transaction_cost = lane_target_client - .estimate_delivery_transaction_in_source_tokens( - 0..=(new_selected_count as MessageNonce - 1), - new_selected_unpaid_weight, - new_selected_size as u32, - ) - .await; - - // if it is the first message that makes reward less than cost, let's log it - // if this message makes batch profitable again, let's log it - let is_total_reward_less_than_cost = total_reward < total_cost; - let prev_total_cost = total_cost; - let prev_total_reward = total_reward; - total_confirmations_cost = total_confirmations_cost.saturating_add(&confirmation_transaction_cost); - total_reward = total_reward.saturating_add(&details.reward); - total_cost = total_confirmations_cost.saturating_add(&delivery_transaction_cost); - if !is_total_reward_less_than_cost && total_reward < total_cost { - log::debug!( - target: "bridge", - "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it larger than \ - total reward {:?}->{:?}", - nonce, - details.reward, - prev_total_cost, - total_cost, - prev_total_reward, - total_reward, - ); - } else if is_total_reward_less_than_cost && total_reward >= total_cost { - log::debug!( - target: "bridge", - "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it less than or \ - equal to the total reward {:?}->{:?} (again)", - nonce, - details.reward, - prev_total_cost, - total_cost, - prev_total_reward, - total_reward, - ); - } - - // NoLosses relayer never want to lose his funds - if total_reward >= total_cost { - soft_selected_count = index + 1; - } - } - } - - hard_selected_count = index + 1; - selected_weight = new_selected_weight; - selected_unpaid_weight = new_selected_unpaid_weight; - selected_size = new_selected_size; - selected_count = new_selected_count; - } - - let hard_selected_begin_nonce = nonces_queue[nonces_queue_range.start].1.begin(); - if hard_selected_count != soft_selected_count { - let hard_selected_end_nonce = hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1; - let soft_selected_begin_nonce = hard_selected_begin_nonce; - let soft_selected_end_nonce = soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1; - log::warn!( - target: "bridge", - "Relayer may deliver nonces [{:?}; {:?}], but because of its strategy ({:?}) it has selected \ - nonces [{:?}; {:?}].", - hard_selected_begin_nonce, - hard_selected_end_nonce, - relayer_mode, - soft_selected_begin_nonce, - soft_selected_end_nonce, - ); - - hard_selected_count = soft_selected_count; - } - - if hard_selected_count != 0 { - Some(hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1) - } else { - None - } -} - impl NoncesRange for MessageDetailsMap { fn begin(&self) -> MessageNonce { self.keys().next().cloned().unwrap_or_default() @@ -751,26 +559,32 @@ impl NoncesRange for MessageDetailsMap; - type TestStrategy = MessageDeliveryStrategy; + type TestStrategy = + MessageDeliveryStrategy; fn source_nonces( new_nonces: RangeInclusive, @@ -809,7 +623,6 @@ mod tests { }; let mut race_strategy = TestStrategy { - relayer_mode: RelayerMode::Altruistic, max_unrewarded_relayer_entries_at_target: 4, max_unconfirmed_nonces_at_target: 4, max_messages_in_single_batch: 4, @@ -830,16 +643,15 @@ mod tests { }, }), strategy: BasicStrategy::new(), + relay_strategy: MixStrategy::new(RelayerMode::Altruistic), }; - race_strategy - .strategy - .source_nonces_updated(header_id(1), source_nonces(20..=23, 19, DEFAULT_REWARD, AtSourceChain)); + race_strategy.strategy.source_nonces_updated( + header_id(1), + source_nonces(20..=23, 19, DEFAULT_REWARD, DispatchFeePayment::AtSourceChain), + ); - let target_nonces = TargetClientNonces { - latest_nonce: 19, - nonces_data: (), - }; + let target_nonces = TargetClientNonces { latest_nonce: 19, nonces_data: () }; race_strategy .strategy .best_target_nonces_updated(target_nonces.clone(), &mut race_state); @@ -859,7 +671,9 @@ mod tests { #[test] fn weights_map_works_as_nonces_range() { - fn build_map(range: RangeInclusive) -> MessageDetailsMap { + fn build_map( + range: RangeInclusive, + ) -> MessageDetailsMap { range .map(|idx| { ( @@ -868,7 +682,7 @@ mod tests { dispatch_weight: idx, size: idx as _, reward: idx as _, - dispatch_fee_payment: AtSourceChain, + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, }, ) }) @@ -906,7 +720,8 @@ mod tests { // we need to wait until confirmations will be delivered by receiving race strategy.latest_confirmed_nonces_at_source = vec![( header_id(1), - strategy.target_nonces.as_ref().unwrap().latest_nonce - strategy.max_unconfirmed_nonces_at_target, + strategy.target_nonces.as_ref().unwrap().latest_nonce - + strategy.max_unconfirmed_nonces_at_target, )] .into_iter() .collect(); @@ -914,13 +729,16 @@ mod tests { } #[async_std::test] - async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() { + async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() + { let (state, mut strategy) = prepare_strategy(); // if there are new confirmed nonces on source, we want to relay this information // to target to prune rewards queue - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + let prev_confirmed_nonce_at_source = + strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = + prev_confirmed_nonce_at_source - 1; assert_eq!( strategy.select_nonces_to_deliver(state).await, Some(((20..=23), proof_parameters(true, 4))) @@ -934,8 +752,10 @@ mod tests { // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, // we need to wait until rewards will be paid { - let mut unrewarded_relayers = &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; + let mut unrewarded_relayers = + &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers; + unrewarded_relayers.unrewarded_relayer_entries = + strategy.max_unrewarded_relayer_entries_at_target; unrewarded_relayers.messages_in_oldest_entry = 4; } assert_eq!(strategy.select_nonces_to_deliver(state).await, None); @@ -948,12 +768,14 @@ mod tests { // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + let prev_confirmed_nonce_at_source = + strategy.latest_confirmed_nonces_at_source.back().unwrap().1; { let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; + unrewarded_relayers.unrewarded_relayer_entries = + strategy.max_unrewarded_relayer_entries_at_target; unrewarded_relayers.messages_in_oldest_entry = 4; } assert_eq!(strategy.select_nonces_to_deliver(state).await, None); @@ -965,12 +787,14 @@ mod tests { // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + let prev_confirmed_nonce_at_source = + strategy.latest_confirmed_nonces_at_source.back().unwrap().1; { let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 3; let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; + unrewarded_relayers.unrewarded_relayer_entries = + strategy.max_unrewarded_relayer_entries_at_target; unrewarded_relayers.messages_in_oldest_entry = 3; } assert_eq!( @@ -992,15 +816,13 @@ mod tests { } #[async_std::test] - async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight() { + async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight( + ) { let (state, mut strategy) = prepare_strategy(); - // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) - strategy.strategy.source_queue_mut()[0] - .1 - .get_mut(&20) - .unwrap() - .dispatch_weight = 10; + // first message doesn't fit in the batch, because it has weight (10) that overflows max + // weight (4) + strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().dispatch_weight = 10; assert_eq!( strategy.select_nonces_to_deliver(state).await, Some(((20..=20), proof_parameters(false, 10))) @@ -1020,10 +842,12 @@ mod tests { } #[async_std::test] - async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size() { + async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size( + ) { let (state, mut strategy) = prepare_strategy(); - // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) + // first message doesn't fit in the batch, because it has weight (10) that overflows max + // weight (4) strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10; assert_eq!( strategy.select_nonces_to_deliver(state).await, @@ -1035,7 +859,8 @@ mod tests { async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() { let (state, mut strategy) = prepare_strategy(); - // not all queued messages may fit in the batch, because batch has max number of messages limit + // not all queued messages may fit in the batch, because batch has max number of messages + // limit strategy.max_messages_in_single_batch = 3; assert_eq!( strategy.select_nonces_to_deliver(state).await, @@ -1044,16 +869,18 @@ mod tests { } #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces() { + async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces( + ) { let (state, mut strategy) = prepare_strategy(); // 1 delivery confirmation from target to source is still missing, so we may only // relay 3 new messages - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![(header_id(1), prev_confirmed_nonce_at_source - 1)] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + let prev_confirmed_nonce_at_source = + strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + strategy.latest_confirmed_nonces_at_source = + vec![(header_id(1), prev_confirmed_nonce_at_source - 1)].into_iter().collect(); + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = + prev_confirmed_nonce_at_source - 1; assert_eq!( strategy.select_nonces_to_deliver(state).await, Some(((20..=22), proof_parameters(false, 3))) @@ -1068,30 +895,35 @@ mod tests { // // => so we can't deliver more than 3 messages let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + let prev_confirmed_nonce_at_source = + strategy.latest_confirmed_nonces_at_source.back().unwrap().1; strategy.latest_confirmed_nonces_at_source = vec![ (header_id(1), prev_confirmed_nonce_at_source - 1), (header_id(2), prev_confirmed_nonce_at_source), ] .into_iter() .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = + prev_confirmed_nonce_at_source - 1; state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); assert_eq!( strategy.select_nonces_to_deliver(state).await, Some(((20..=22), proof_parameters(false, 3))) ); - // the same situation, but the header 2 is known to the target node, so we may deliver reward confirmation + // the same situation, but the header 2 is known to the target node, so we may deliver + // reward confirmation let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + let prev_confirmed_nonce_at_source = + strategy.latest_confirmed_nonces_at_source.back().unwrap().1; strategy.latest_confirmed_nonces_at_source = vec![ (header_id(1), prev_confirmed_nonce_at_source - 1), (header_id(2), prev_confirmed_nonce_at_source), ] .into_iter() .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = + prev_confirmed_nonce_at_source - 1; state.best_finalized_source_header_id_at_source = Some(header_id(2)); state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); assert_eq!( @@ -1105,8 +937,9 @@ mod tests { // let's prepare situation when: // - all messages [20; 23] have been generated at source block#1; let (mut state, mut strategy) = prepare_strategy(); - // - messages [20; 21] have been delivered, but messages [11; 20] can't be delivered because of unrewarded - // relayers vector capacity; + // + // - messages [20; 21] have been delivered, but messages [11; 20] can't be delivered because + // of unrewarded relayers vector capacity; strategy.max_unconfirmed_nonces_at_target = 2; assert_eq!( strategy.select_nonces_to_deliver(state.clone()).await, @@ -1127,25 +960,21 @@ mod tests { &mut state, ); assert_eq!(strategy.select_nonces_to_deliver(state).await, None); + // // - messages [1; 10] receiving confirmation has been delivered at source block#2; strategy.source_nonces_updated( header_id(2), - SourceClientNonces { - new_nonces: MessageDetailsMap::new(), - confirmed_nonce: Some(21), - }, + SourceClientNonces { new_nonces: MessageDetailsMap::new(), confirmed_nonce: Some(21) }, ); + // // - so now we'll need to relay source block#11 to be able to accept messages [11; 20]. - assert_eq!( - strategy.required_source_header_at_target(&header_id(1)), - Some(header_id(2)) - ); + assert_eq!(strategy.required_source_header_at_target(&header_id(1)), Some(header_id(2))); } #[async_std::test] - async fn no_losses_relayer_is_delivering_messages_if_cost_is_equal_to_reward() { + async fn rational_relayer_is_delivering_messages_if_cost_is_equal_to_reward() { let (state, mut strategy) = prepare_strategy(); - strategy.relayer_mode = RelayerMode::NoLosses; + strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational); // so now we have: // - 20..=23 with reward = cost @@ -1157,17 +986,17 @@ mod tests { } #[async_std::test] - async fn no_losses_relayer_is_not_delivering_messages_if_cost_is_larger_than_reward() { + async fn rational_relayer_is_not_delivering_messages_if_cost_is_larger_than_reward() { let (mut state, mut strategy) = prepare_strategy(); let nonces = source_nonces( 24..=25, 19, DEFAULT_REWARD - BASE_MESSAGE_DELIVERY_TRANSACTION_COST, - AtSourceChain, + DispatchFeePayment::AtSourceChain, ); strategy.strategy.source_nonces_updated(header_id(2), nonces); state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - strategy.relayer_mode = RelayerMode::NoLosses; + strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational); // so now we have: // - 20..=23 with reward = cost @@ -1180,7 +1009,7 @@ mod tests { } #[async_std::test] - async fn no_losses_relayer_is_delivering_unpaid_messages() { + async fn rational_relayer_is_delivering_unpaid_messages() { async fn test_with_dispatch_fee_payment( dispatch_fee_payment: DispatchFeePayment, ) -> Option<(RangeInclusive, MessageProofParameters)> { @@ -1198,23 +1027,23 @@ mod tests { strategy.max_messages_in_single_batch = 100; strategy.max_messages_weight_in_single_batch = 100; strategy.max_messages_size_in_single_batch = 100; - strategy.relayer_mode = RelayerMode::NoLosses; + strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational); // so now we have: // - 20..=23 with reward = cost - // - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT` from the - // cost, so it should be fine; + // - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT` + // from the cost, so it should be fine; // => when MSG#24 fee is paid at the target chain, strategy shall select all 20..=24 // => when MSG#25 fee is paid at the source chain, strategy shall only select 20..=23 strategy.select_nonces_to_deliver(state).await } assert_eq!( - test_with_dispatch_fee_payment(AtTargetChain).await, + test_with_dispatch_fee_payment(DispatchFeePayment::AtTargetChain).await, Some(((20..=24), proof_parameters(false, 5))) ); assert_eq!( - test_with_dispatch_fee_payment(AtSourceChain).await, + test_with_dispatch_fee_payment(DispatchFeePayment::AtSourceChain).await, Some(((20..=23), proof_parameters(false, 4))) ); } @@ -1224,13 +1053,13 @@ mod tests { // Real scenario that has happened on test deployments: // 1) relayer witnessed M1 at block 1 => it has separate entry in the `source_queue` // 2) relayer witnessed M2 at block 2 => it has separate entry in the `source_queue` - // 3) if block 2 is known to the target node, then both M1 and M2 are selected for single delivery, - // even though weight(M1+M2) > larger than largest allowed weight + // 3) if block 2 is known to the target node, then both M1 and M2 are selected for single + // delivery, even though weight(M1+M2) > larger than largest allowed weight // - // This was happening because selector (`select_nonces_for_delivery_transaction`) has been called - // for every `source_queue` entry separately without preserving any context. + // This was happening because selector (`select_nonces_for_delivery_transaction`) has been + // called for every `source_queue` entry separately without preserving any context. let (mut state, mut strategy) = prepare_strategy(); - let nonces = source_nonces(24..=25, 19, DEFAULT_REWARD, AtSourceChain); + let nonces = source_nonces(24..=25, 19, DEFAULT_REWARD, DispatchFeePayment::AtSourceChain); strategy.strategy.source_nonces_updated(header_id(2), nonces); strategy.max_unrewarded_relayer_entries_at_target = 100; strategy.max_unconfirmed_nonces_at_target = 100; diff --git a/polkadot/bridges/relays/messages/src/message_race_loop.rs b/polkadot/bridges/relays/messages/src/message_race_loop.rs index 3b427a2d0e27f28102b27197e35310f26731e284..a7254f70ee4a472757bf0a77eef4cf82fb8924c3 100644 --- a/polkadot/bridges/relays/messages/src/message_race_loop.rs +++ b/polkadot/bridges/relays/messages/src/message_race_loop.rs @@ -54,10 +54,12 @@ pub trait MessageRace { } /// State of race source client. -type SourceClientState

= ClientState<

::SourceHeaderId,

::TargetHeaderId>; +type SourceClientState

= + ClientState<

::SourceHeaderId,

::TargetHeaderId>; /// State of race target client. -type TargetClientState

= ClientState<

::TargetHeaderId,

::SourceHeaderId>; +type TargetClientState

= + ClientState<

::TargetHeaderId,

::SourceHeaderId>; /// Inclusive nonces range. pub trait NoncesRange: Debug + Sized { @@ -76,7 +78,7 @@ pub struct SourceClientNonces { /// New nonces range known to the client. `New` here means all nonces generated after /// `prev_latest_nonce` passed to the `SourceClient::nonces` method. pub new_nonces: NoncesRange, - /// Latest nonce that is confirmed to the bridged client. This nonce only makes + /// The latest nonce that is confirmed to the bridged client. This nonce only makes /// sense in some races. In other races it is `None`. pub confirmed_nonce: Option, } @@ -84,7 +86,7 @@ pub struct SourceClientNonces { /// Nonces on the race target client. #[derive(Debug, Clone)] pub struct TargetClientNonces { - /// Latest nonce that is known to the target client. + /// The latest nonce that is known to the target client. pub latest_nonce: MessageNonce, /// Additional data from target node that may be used by the race. pub nonces_data: TargetNoncesData, @@ -93,7 +95,7 @@ pub struct TargetClientNonces { /// One of message lane clients, which is source client for the race. #[async_trait] pub trait SourceClient { - /// Type of error this clients returns. + /// Type of error these clients returns. type Error: std::fmt::Debug + MaybeConnectionError; /// Type of nonces range returned by the source client. type NoncesRange: NoncesRange; @@ -118,7 +120,7 @@ pub trait SourceClient { /// One of message lane clients, which is target client for the race. #[async_trait] pub trait TargetClient { - /// Type of error this clients returns. + /// Type of error these clients returns. type Error: std::fmt::Debug + MaybeConnectionError; /// Type of the additional data from the target client, used by the race. type TargetNoncesData: std::fmt::Debug; @@ -155,19 +157,26 @@ pub trait RaceStrategy: Debug { /// Should return true if nothing has to be synced. fn is_empty(&self) -> bool; /// Return id of source header that is required to be on target to continue synchronization. - fn required_source_header_at_target(&self, current_best: &SourceHeaderId) -> Option; - /// Return best nonce at source node. + fn required_source_header_at_target( + &self, + current_best: &SourceHeaderId, + ) -> Option; + /// Return the best nonce at source node. /// /// `Some` is returned only if we are sure that the value is greater or equal /// than the result of `best_at_target`. fn best_at_source(&self) -> Option; - /// Return best nonce at target node. + /// Return the best nonce at target node. /// /// May return `None` if value is yet unknown. fn best_at_target(&self) -> Option; /// Called when nonces are updated at source node of the race. - fn source_nonces_updated(&mut self, at_block: SourceHeaderId, nonces: SourceClientNonces); + fn source_nonces_updated( + &mut self, + at_block: SourceHeaderId, + nonces: SourceClientNonces, + ); /// Called when best nonces are updated at target node of the race. fn best_target_nonces_updated( &mut self, @@ -197,7 +206,7 @@ pub struct RaceState { /// Best finalized source header id at the best block on the target /// client (at the `best_finalized_source_header_id_at_best_target`). pub best_finalized_source_header_id_at_best_target: Option, - /// Best header id at the target client. + /// The best header id at the target client. pub best_target_header_id: Option, /// Best finalized header id at the target client. pub best_finalized_target_header_id: Option, @@ -430,8 +439,10 @@ pub async fn run, TC: TargetClient

>( strategy, ); - return Err(FailedClient::Both); - } else if race_state.nonces_to_submit.is_none() && race_state.nonces_submitted.is_none() && strategy.is_empty() + return Err(FailedClient::Both) + } else if race_state.nonces_to_submit.is_none() && + race_state.nonces_submitted.is_none() && + strategy.is_empty() { stall_countdown = Instant::now(); } @@ -439,7 +450,8 @@ pub async fn run, TC: TargetClient

>( if source_client_is_online { source_client_is_online = false; - let nonces_to_deliver = select_nonces_to_deliver(race_state.clone(), &mut strategy).await; + let nonces_to_deliver = + select_nonces_to_deliver(race_state.clone(), &mut strategy).await; let best_at_source = strategy.best_at_source(); if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver { @@ -451,9 +463,7 @@ pub async fn run, TC: TargetClient

>( at_block, ); source_generate_proof.set( - race_source - .generate_proof(at_block, nonces_range, proof_parameters) - .fuse(), + race_source.generate_proof(at_block, nonces_range, proof_parameters).fuse(), ); } else if source_nonces_required && best_at_source.is_some() { log::debug!(target: "bridge", "Asking {} about message nonces", P::source_name()); @@ -516,7 +526,9 @@ pub async fn run, TC: TargetClient

>( } } -impl Default for RaceState { +impl Default + for RaceState +{ fn default() -> Self { RaceState { best_finalized_source_header_id_at_source: None, @@ -539,7 +551,7 @@ where let need_update = now_time.saturating_duration_since(prev_time) > Duration::from_secs(10); if !need_update { - return prev_time; + return prev_time } let now_best_nonce_at_source = strategy.best_at_source(); @@ -569,11 +581,7 @@ where .select_nonces_to_deliver(race_state) .await .map(|(nonces_range, proof_parameters)| { - ( - best_finalized_source_header_id_at_best_target, - nonces_range, - proof_parameters, - ) + (best_finalized_source_header_id_at_best_target, nonces_range, proof_parameters) }) } @@ -592,8 +600,14 @@ mod tests { // target node only knows about source' BEST_AT_TARGET block // source node has BEST_AT_SOURCE > BEST_AT_TARGET block let mut race_state = RaceState::<_, _, ()> { - best_finalized_source_header_id_at_source: Some(HeaderId(BEST_AT_SOURCE, BEST_AT_SOURCE)), - best_finalized_source_header_id_at_best_target: Some(HeaderId(BEST_AT_TARGET, BEST_AT_TARGET)), + best_finalized_source_header_id_at_source: Some(HeaderId( + BEST_AT_SOURCE, + BEST_AT_SOURCE, + )), + best_finalized_source_header_id_at_best_target: Some(HeaderId( + BEST_AT_TARGET, + BEST_AT_TARGET, + )), best_target_header_id: Some(HeaderId(0, 0)), best_finalized_target_header_id: Some(HeaderId(0, 0)), nonces_to_submit: None, @@ -604,16 +618,10 @@ mod tests { let mut strategy = BasicStrategy::new(); strategy.source_nonces_updated( HeaderId(GENERATED_AT, GENERATED_AT), - SourceClientNonces { - new_nonces: 0..=10, - confirmed_nonce: None, - }, + SourceClientNonces { new_nonces: 0..=10, confirmed_nonce: None }, ); strategy.best_target_nonces_updated( - TargetClientNonces { - latest_nonce: 5u64, - nonces_data: (), - }, + TargetClientNonces { latest_nonce: 5u64, nonces_data: () }, &mut race_state, ); diff --git a/polkadot/bridges/relays/messages/src/message_race_receiving.rs b/polkadot/bridges/relays/messages/src/message_race_receiving.rs index 4381b63591f718b8fe8301443bffff153a23232e..5aa36cbd9c6dcf76fe86c1c70479ab5deb55deb6 100644 --- a/polkadot/bridges/relays/messages/src/message_race_receiving.rs +++ b/polkadot/bridges/relays/messages/src/message_race_receiving.rs @@ -11,18 +11,21 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -//! Message receiving race delivers proof-of-messages-delivery from lane.target to lane.source. - -use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use crate::message_lane_loop::{ - SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient, - TargetClientState, -}; -use crate::message_race_loop::{ - MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, TargetClientNonces, +//! Message receiving race delivers proof-of-messages-delivery from "lane.target" to "lane.source". + +use crate::{ + message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, + message_lane_loop::{ + SourceClient as MessageLaneSourceClient, SourceClientState, + TargetClient as MessageLaneTargetClient, TargetClientState, + }, + message_race_loop::{ + MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, + TargetClientNonces, + }, + message_race_strategy::BasicStrategy, + metrics::MessageLaneLoopMetrics, }; -use crate::message_race_strategy::BasicStrategy; -use crate::metrics::MessageLaneLoopMetrics; use async_trait::async_trait; use bp_messages::MessageNonce; @@ -129,11 +132,7 @@ where nonces: RangeInclusive, _proof_parameters: Self::ProofParameters, ) -> Result< - ( - TargetHeaderIdOf

, - RangeInclusive, - P::MessagesReceivingProof, - ), + (TargetHeaderIdOf

, RangeInclusive, P::MessagesReceivingProof), Self::Error, > { self.client @@ -168,19 +167,14 @@ where at_block: SourceHeaderIdOf

, update_metrics: bool, ) -> Result<(SourceHeaderIdOf

, TargetClientNonces<()>), Self::Error> { - let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; + let (at_block, latest_confirmed_nonce) = + self.client.latest_confirmed_received_nonce(at_block).await?; if update_metrics { if let Some(metrics_msg) = self.metrics_msg.as_ref() { metrics_msg.update_source_latest_confirmed_nonce::

(latest_confirmed_nonce); } } - Ok(( - at_block, - TargetClientNonces { - latest_nonce: latest_confirmed_nonce, - nonces_data: (), - }, - )) + Ok((at_block, TargetClientNonces { latest_nonce: latest_confirmed_nonce, nonces_data: () })) } async fn submit_proof( @@ -189,9 +183,7 @@ where nonces: RangeInclusive, proof: P::MessagesReceivingProof, ) -> Result, Self::Error> { - self.client - .submit_messages_receiving_proof(generated_at_block, proof) - .await?; + self.client.submit_messages_receiving_proof(generated_at_block, proof).await?; Ok(nonces) } } diff --git a/polkadot/bridges/relays/messages/src/message_race_strategy.rs b/polkadot/bridges/relays/messages/src/message_race_strategy.rs index ff5c1eda012d69801b7d06e41f2937300edea396..4ecf451deb07e0b5d46f171c160267a529480e47 100644 --- a/polkadot/bridges/relays/messages/src/message_race_strategy.rs +++ b/polkadot/bridges/relays/messages/src/message_race_strategy.rs @@ -17,7 +17,9 @@ //! 2) new nonces may be proved to target node (i.e. they have appeared at the //! block, which is known to the target node). -use crate::message_race_loop::{NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces}; +use crate::message_race_loop::{ + NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces, +}; use async_trait::async_trait; use bp_messages::MessageNonce; @@ -40,15 +42,29 @@ pub struct BasicStrategy< > { /// All queued nonces. source_queue: SourceRangesQueue, - /// Best nonce known to target node (at its best block). `None` if it has not been received yet. + /// The best nonce known to target node (at its best block). `None` if it has not been received + /// yet. best_target_nonce: Option, /// Unused generic types dump. _phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>, } -impl - BasicStrategy -where +impl< + SourceHeaderNumber, + SourceHeaderHash, + TargetHeaderNumber, + TargetHeaderHash, + SourceNoncesRange, + Proof, + > + BasicStrategy< + SourceHeaderNumber, + SourceHeaderHash, + TargetHeaderNumber, + TargetHeaderHash, + SourceNoncesRange, + Proof, + > where SourceHeaderHash: Clone, SourceHeaderNumber: Clone + Ord, SourceNoncesRange: NoncesRange, @@ -79,9 +95,9 @@ where /// Returns index of the latest source queue entry, that may be delivered to the target node. /// - /// Returns `None` if no entries may be delivered. All entries before and including the `Some(_)` - /// index are guaranteed to be witnessed at source blocks that are known to be finalized at the - /// target node. + /// Returns `None` if no entries may be delivered. All entries before and including the + /// `Some(_)` index are guaranteed to be witnessed at source blocks that are known to be + /// finalized at the target node. pub fn maximal_available_source_queue_index( &self, race_state: RaceState< @@ -95,12 +111,12 @@ where // if we have already selected nonces that we want to submit, do nothing if race_state.nonces_to_submit.is_some() { - return None; + return None } // if we already submitted some nonces, do nothing if race_state.nonces_submitted.is_some() { - return None; + return None } // 1) we want to deliver all nonces, starting from `target_nonce + 1` @@ -124,17 +140,34 @@ where while let Some((queued_at, queued_range)) = self.source_queue.pop_front() { if let Some(range_to_requeue) = queued_range.greater_than(nonce) { self.source_queue.push_front((queued_at, range_to_requeue)); - break; + break } } } } #[async_trait] -impl - RaceStrategy, HeaderId, Proof> - for BasicStrategy -where +impl< + SourceHeaderNumber, + SourceHeaderHash, + TargetHeaderNumber, + TargetHeaderHash, + SourceNoncesRange, + Proof, + > + RaceStrategy< + HeaderId, + HeaderId, + Proof, + > + for BasicStrategy< + SourceHeaderNumber, + SourceHeaderHash, + TargetHeaderNumber, + TargetHeaderHash, + SourceNoncesRange, + Proof, + > where SourceHeaderHash: Clone + Debug + Send, SourceHeaderNumber: Clone + Ord + Debug + Send, SourceNoncesRange: NoncesRange + Debug + Send, @@ -162,7 +195,8 @@ where fn best_at_source(&self) -> Option { let best_in_queue = self.source_queue.back().map(|(_, range)| range.end()); match (best_in_queue, self.best_target_nonce) { - (Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => Some(best_in_queue), + (Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => + Some(best_in_queue), (_, Some(best_target_nonce)) => Some(best_target_nonce), (_, None) => None, } @@ -205,18 +239,17 @@ where if let Some(best_target_nonce) = self.best_target_nonce { if nonce < best_target_nonce { - return; + return } } while let Some(true) = self.source_queue.front().map(|(_, range)| range.begin() <= nonce) { - let maybe_subrange = self - .source_queue - .pop_front() - .and_then(|(at_block, range)| range.greater_than(nonce).map(|subrange| (at_block, subrange))); + let maybe_subrange = self.source_queue.pop_front().and_then(|(at_block, range)| { + range.greater_than(nonce).map(|subrange| (at_block, subrange)) + }); if let Some((at_block, subrange)) = maybe_subrange { self.source_queue.push_front((at_block, subrange)); - break; + break } } @@ -238,10 +271,8 @@ where race_state.nonces_submitted = None; } - self.best_target_nonce = Some(std::cmp::max( - self.best_target_nonce.unwrap_or(nonces.latest_nonce), - nonce, - )); + self.best_target_nonce = + Some(std::cmp::max(self.best_target_nonce.unwrap_or(nonces.latest_nonce), nonce)); } fn finalized_target_nonces_updated( @@ -278,9 +309,12 @@ where #[cfg(test)] mod tests { use super::*; - use crate::message_lane::MessageLane; - use crate::message_lane_loop::tests::{ - header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash, TestSourceHeaderNumber, + use crate::{ + message_lane::MessageLane, + message_lane_loop::tests::{ + header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash, + TestSourceHeaderNumber, + }, }; type SourceNoncesRange = RangeInclusive; @@ -295,17 +329,11 @@ mod tests { >; fn source_nonces(new_nonces: SourceNoncesRange) -> SourceClientNonces { - SourceClientNonces { - new_nonces, - confirmed_nonce: None, - } + SourceClientNonces { new_nonces, confirmed_nonce: None } } fn target_nonces(latest_nonce: MessageNonce) -> TargetClientNonces<()> { - TargetClientNonces { - latest_nonce, - nonces_data: (), - } + TargetClientNonces { latest_nonce, nonces_data: () } } #[test] @@ -420,18 +448,12 @@ mod tests { strategy.source_nonces_updated(header_id(5), source_nonces(7..=8)); state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - assert_eq!( - strategy.select_nonces_to_deliver(state.clone()).await, - Some((1..=6, ())) - ); + assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((1..=6, ()))); strategy.best_target_nonces_updated(target_nonces(6), &mut state); assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); state.best_finalized_source_header_id_at_best_target = Some(header_id(5)); - assert_eq!( - strategy.select_nonces_to_deliver(state.clone()).await, - Some((7..=8, ())) - ); + assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((7..=8, ()))); strategy.best_target_nonces_updated(target_nonces(8), &mut state); assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); } @@ -471,16 +493,17 @@ mod tests { strategy.source_nonces_updated(header_id(3), source_nonces(7..=9)); fn source_queue_nonces( - source_queue: &SourceRangesQueue, + source_queue: &SourceRangesQueue< + TestSourceHeaderHash, + TestSourceHeaderNumber, + SourceNoncesRange, + >, ) -> Vec { source_queue.iter().flat_map(|(_, range)| range.clone()).collect() } strategy.remove_le_nonces_from_source_queue(1); - assert_eq!( - source_queue_nonces(&strategy.source_queue), - vec![2, 3, 4, 5, 6, 7, 8, 9], - ); + assert_eq!(source_queue_nonces(&strategy.source_queue), vec![2, 3, 4, 5, 6, 7, 8, 9],); strategy.remove_le_nonces_from_source_queue(5); assert_eq!(source_queue_nonces(&strategy.source_queue), vec![6, 7, 8, 9]); diff --git a/polkadot/bridges/relays/messages/src/metrics.rs b/polkadot/bridges/relays/messages/src/metrics.rs index 51a4118be8582a5a692e6563b14d903e99b678dd..eac2f703692a19297cb5fd8d012e8a28fee724ce 100644 --- a/polkadot/bridges/relays/messages/src/metrics.rs +++ b/polkadot/bridges/relays/messages/src/metrics.rs @@ -16,11 +16,15 @@ //! Metrics for message lane relay loop. -use crate::message_lane::MessageLane; -use crate::message_lane_loop::{SourceClientState, TargetClientState}; +use crate::{ + message_lane::MessageLane, + message_lane_loop::{SourceClientState, TargetClientState}, +}; use bp_messages::MessageNonce; -use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; +use relay_utils::metrics::{ + metric_name, register, GaugeVec, Metric, Opts, PrometheusError, Registry, U64, +}; /// Message lane relay metrics. /// @@ -36,30 +40,22 @@ pub struct MessageLaneLoopMetrics { impl MessageLaneLoopMetrics { /// Create and register messages loop metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + pub fn new(prefix: Option<&str>) -> Result { Ok(MessageLaneLoopMetrics { - best_block_numbers: register( - GaugeVec::new( - Opts::new( - metric_name(prefix, "best_block_numbers"), - "Best finalized block numbers", - ), - &["type"], - )?, - registry, + best_block_numbers: GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best finalized block numbers", + ), + &["type"], )?, - lane_state_nonces: register( - GaugeVec::new( - Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), - &["type"], - )?, - registry, + lane_state_nonces: GaugeVec::new( + Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), + &["type"], )?, }) } -} -impl MessageLaneLoopMetrics { /// Update source client state metrics. pub fn update_source_state(&self, source_client_state: SourceClientState

) { self.best_block_numbers @@ -81,30 +77,50 @@ impl MessageLaneLoopMetrics { } /// Update latest generated nonce at source. - pub fn update_source_latest_generated_nonce(&self, source_latest_generated_nonce: MessageNonce) { + pub fn update_source_latest_generated_nonce( + &self, + source_latest_generated_nonce: MessageNonce, + ) { self.lane_state_nonces .with_label_values(&["source_latest_generated"]) .set(source_latest_generated_nonce); } - /// Update latest confirmed nonce at source. - pub fn update_source_latest_confirmed_nonce(&self, source_latest_confirmed_nonce: MessageNonce) { + /// Update the latest confirmed nonce at source. + pub fn update_source_latest_confirmed_nonce( + &self, + source_latest_confirmed_nonce: MessageNonce, + ) { self.lane_state_nonces .with_label_values(&["source_latest_confirmed"]) .set(source_latest_confirmed_nonce); } - /// Update latest received nonce at target. - pub fn update_target_latest_received_nonce(&self, target_latest_generated_nonce: MessageNonce) { + /// Update the latest received nonce at target. + pub fn update_target_latest_received_nonce( + &self, + target_latest_generated_nonce: MessageNonce, + ) { self.lane_state_nonces .with_label_values(&["target_latest_received"]) .set(target_latest_generated_nonce); } - /// Update latest confirmed nonce at target. - pub fn update_target_latest_confirmed_nonce(&self, target_latest_confirmed_nonce: MessageNonce) { + /// Update the latest confirmed nonce at target. + pub fn update_target_latest_confirmed_nonce( + &self, + target_latest_confirmed_nonce: MessageNonce, + ) { self.lane_state_nonces .with_label_values(&["target_latest_confirmed"]) .set(target_latest_confirmed_nonce); } } + +impl Metric for MessageLaneLoopMetrics { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.best_block_numbers.clone(), registry)?; + register(self.lane_state_nonces.clone(), registry)?; + Ok(()) + } +} diff --git a/polkadot/bridges/relays/headers/src/lib.rs b/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs similarity index 53% rename from polkadot/bridges/relays/headers/src/lib.rs rename to polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs index 8946355921f0d5255b4264e722eec211828abffd..d6fec7f1297b3ec6e6b4b7dccc7aada76d11f307 100644 --- a/polkadot/bridges/relays/headers/src/lib.rs +++ b/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs @@ -14,20 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -//! Relaying source chain headers to target chain. This module provides entrypoint -//! that starts reading new headers from source chain and submit these headers as -//! module/contract transactions to the target chain. Pallet/contract on the target -//! chain is a light-client of the source chain. All other trustless bridge -//! applications are built using this light-client, so running headers-relay is -//! essential for running all other bridge applications. - -// required for futures::select! -#![recursion_limit = "1024"] -#![warn(missing_docs)] - -pub mod headers; -pub mod sync; -pub mod sync_loop; -pub mod sync_loop_metrics; -pub mod sync_loop_tests; -pub mod sync_types; +//! Altruistic relay strategy + +use async_trait::async_trait; + +use crate::{ + message_lane::MessageLane, + message_lane_loop::{ + SourceClient as MessageLaneSourceClient, TargetClient as MessageLaneTargetClient, + }, + relay_strategy::{RelayReference, RelayStrategy}, +}; + +/// The relayer doesn't care about rewards. +#[derive(Clone)] +pub struct AltruisticStrategy; + +#[async_trait] +impl RelayStrategy for AltruisticStrategy { + async fn decide< + P: MessageLane, + SourceClient: MessageLaneSourceClient

, + TargetClient: MessageLaneTargetClient

, + >( + &mut self, + _reference: &mut RelayReference, + ) -> bool { + true + } +} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs new file mode 100644 index 0000000000000000000000000000000000000000..1e9ef5bdbf818e698180e77cc9a84f86da9ee2d6 --- /dev/null +++ b/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs @@ -0,0 +1,219 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! enforcement strategy + +use num_traits::Zero; + +use bp_messages::{MessageNonce, Weight}; +use bp_runtime::messages::DispatchFeePayment; + +use crate::{ + message_lane::MessageLane, + message_lane_loop::{ + MessageDetails, SourceClient as MessageLaneSourceClient, + TargetClient as MessageLaneTargetClient, + }, + message_race_loop::NoncesRange, + relay_strategy::{RelayMessagesBatchReference, RelayReference, RelayStrategy}, +}; + +/// Do hard check and run soft check strategy +#[derive(Clone)] +pub struct EnforcementStrategy { + strategy: Strategy, +} + +impl EnforcementStrategy { + pub fn new(strategy: Strategy) -> Self { + Self { strategy } + } +} + +impl EnforcementStrategy { + pub async fn decide< + P: MessageLane, + SourceClient: MessageLaneSourceClient

, + TargetClient: MessageLaneTargetClient

, + >( + &mut self, + reference: RelayMessagesBatchReference, + ) -> Option { + let mut hard_selected_count = 0; + let mut soft_selected_count = 0; + + let mut selected_weight: Weight = 0; + let mut selected_count: MessageNonce = 0; + + let hard_selected_begin_nonce = + reference.nonces_queue[reference.nonces_queue_range.start].1.begin(); + + // relay reference + let mut relay_reference = RelayReference { + lane_source_client: reference.lane_source_client.clone(), + lane_target_client: reference.lane_target_client.clone(), + + selected_reward: P::SourceChainBalance::zero(), + selected_cost: P::SourceChainBalance::zero(), + selected_size: 0, + + total_reward: P::SourceChainBalance::zero(), + total_confirmations_cost: P::SourceChainBalance::zero(), + total_cost: P::SourceChainBalance::zero(), + + hard_selected_begin_nonce, + selected_prepaid_nonces: 0, + selected_unpaid_weight: 0, + + index: 0, + nonce: 0, + details: MessageDetails { + dispatch_weight: 0, + size: 0, + reward: P::SourceChainBalance::zero(), + dispatch_fee_payment: DispatchFeePayment::AtSourceChain, + }, + }; + + let all_ready_nonces = reference + .nonces_queue + .range(reference.nonces_queue_range.clone()) + .flat_map(|(_, ready_nonces)| ready_nonces.iter()) + .enumerate(); + for (index, (nonce, details)) in all_ready_nonces { + relay_reference.index = index; + relay_reference.nonce = *nonce; + relay_reference.details = *details; + + // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` + // and `max_messages_size_in_single_batch`, we may still try to submit transaction + // with single message if message overflows these limits. The worst case would be if + // transaction will be rejected by the target runtime, but at least we have tried. + + // limit messages in the batch by weight + let new_selected_weight = match selected_weight.checked_add(details.dispatch_weight) { + Some(new_selected_weight) + if new_selected_weight <= reference.max_messages_weight_in_single_batch => + new_selected_weight, + new_selected_weight if selected_count == 0 => { + log::warn!( + target: "bridge", + "Going to submit message delivery transaction with declared dispatch \ + weight {:?} that overflows maximal configured weight {}", + new_selected_weight, + reference.max_messages_weight_in_single_batch, + ); + new_selected_weight.unwrap_or(Weight::MAX) + }, + _ => break, + }; + + // limit messages in the batch by size + let new_selected_size = match relay_reference.selected_size.checked_add(details.size) { + Some(new_selected_size) + if new_selected_size <= reference.max_messages_size_in_single_batch => + new_selected_size, + new_selected_size if selected_count == 0 => { + log::warn!( + target: "bridge", + "Going to submit message delivery transaction with message \ + size {:?} that overflows maximal configured size {}", + new_selected_size, + reference.max_messages_size_in_single_batch, + ); + new_selected_size.unwrap_or(u32::MAX) + }, + _ => break, + }; + + // limit number of messages in the batch + let new_selected_count = selected_count + 1; + if new_selected_count > reference.max_messages_in_this_batch { + break + } + relay_reference.selected_size = new_selected_size; + + // If dispatch fee has been paid at the source chain, it means that it is **relayer** + // who's paying for dispatch at the target chain AND reward must cover this dispatch + // fee. + // + // If dispatch fee is paid at the target chain, it means that it'll be withdrawn from + // the dispatch origin account AND reward is not covering this fee. + // + // So in the latter case we're not adding the dispatch weight to the delivery + // transaction weight. + let mut new_selected_prepaid_nonces = relay_reference.selected_prepaid_nonces; + let new_selected_unpaid_weight = match details.dispatch_fee_payment { + DispatchFeePayment::AtSourceChain => { + new_selected_prepaid_nonces += 1; + relay_reference.selected_unpaid_weight.saturating_add(details.dispatch_weight) + }, + DispatchFeePayment::AtTargetChain => relay_reference.selected_unpaid_weight, + }; + relay_reference.selected_prepaid_nonces = new_selected_prepaid_nonces; + relay_reference.selected_unpaid_weight = new_selected_unpaid_weight; + + // now the message has passed all 'strong' checks, and we CAN deliver it. But do we WANT + // to deliver it? It depends on the relayer strategy. + if self.strategy.decide(&mut relay_reference).await { + soft_selected_count = index + 1; + } + + hard_selected_count = index + 1; + selected_weight = new_selected_weight; + selected_count = new_selected_count; + } + + if hard_selected_count != soft_selected_count { + let hard_selected_end_nonce = + hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1; + let soft_selected_begin_nonce = hard_selected_begin_nonce; + let soft_selected_end_nonce = + soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1; + log::warn!( + target: "bridge", + "Relayer may deliver nonces [{:?}; {:?}], but because of its strategy it has selected \ + nonces [{:?}; {:?}].", + hard_selected_begin_nonce, + hard_selected_end_nonce, + soft_selected_begin_nonce, + soft_selected_end_nonce, + ); + + hard_selected_count = soft_selected_count; + } + + if hard_selected_count != 0 { + if relay_reference.selected_reward != P::SourceChainBalance::zero() && + relay_reference.selected_cost != P::SourceChainBalance::zero() + { + log::trace!( + target: "bridge", + "Expected reward from delivering nonces [{:?}; {:?}] is: {:?} - {:?} = {:?}", + hard_selected_begin_nonce, + hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1, + &relay_reference.selected_reward, + &relay_reference.selected_cost, + relay_reference.selected_reward - relay_reference.selected_cost, + ); + } + + Some(hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1) + } else { + None + } + } +} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ac7fe1d0ed0698a033eebe07c186e663a2d8261 --- /dev/null +++ b/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs @@ -0,0 +1,58 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Adapter for using `enum RelayerMode` in a context which requires `RelayStrategy`. + +use async_trait::async_trait; + +use crate::{ + message_lane::MessageLane, + message_lane_loop::{ + RelayerMode, SourceClient as MessageLaneSourceClient, + TargetClient as MessageLaneTargetClient, + }, + relay_strategy::{AltruisticStrategy, RationalStrategy, RelayReference, RelayStrategy}, +}; + +/// `RelayerMode` adapter. +#[derive(Clone)] +pub struct MixStrategy { + relayer_mode: RelayerMode, +} + +impl MixStrategy { + /// Create mix strategy instance + pub fn new(relayer_mode: RelayerMode) -> Self { + Self { relayer_mode } + } +} + +#[async_trait] +impl RelayStrategy for MixStrategy { + async fn decide< + P: MessageLane, + SourceClient: MessageLaneSourceClient

, + TargetClient: MessageLaneTargetClient

, + >( + &mut self, + reference: &mut RelayReference, + ) -> bool { + match self.relayer_mode { + RelayerMode::Altruistic => AltruisticStrategy.decide(reference).await, + RelayerMode::Rational => RationalStrategy.decide(reference).await, + } + } +} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs b/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..d902bd93e5cf98c65b6bda47126385c5555dfb2b --- /dev/null +++ b/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs @@ -0,0 +1,123 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relayer strategy + +use std::ops::Range; + +use async_trait::async_trait; + +use bp_messages::{MessageNonce, Weight}; + +use crate::{ + message_lane::MessageLane, + message_lane_loop::{ + MessageDetails, MessageDetailsMap, SourceClient as MessageLaneSourceClient, + TargetClient as MessageLaneTargetClient, + }, + message_race_strategy::SourceRangesQueue, +}; + +pub(crate) use self::enforcement_strategy::*; +pub use self::{altruistic_strategy::*, mix_strategy::*, rational_strategy::*}; + +mod altruistic_strategy; +mod enforcement_strategy; +mod mix_strategy; +mod rational_strategy; + +/// Relayer strategy trait +#[async_trait] +pub trait RelayStrategy: 'static + Clone + Send + Sync { + /// The relayer decide how to process nonce by reference. + /// From given set of source nonces, that are ready to be delivered, select nonces + /// to fit into single delivery transaction. + /// + /// The function returns last nonce that must be delivered to the target chain. + async fn decide< + P: MessageLane, + SourceClient: MessageLaneSourceClient

, + TargetClient: MessageLaneTargetClient

, + >( + &mut self, + reference: &mut RelayReference, + ) -> bool; +} + +/// Reference data for participating in relay +pub struct RelayReference< + P: MessageLane, + SourceClient: MessageLaneSourceClient

, + TargetClient: MessageLaneTargetClient

, +> { + /// The client that is connected to the message lane source node. + pub lane_source_client: SourceClient, + /// The client that is connected to the message lane target node. + pub lane_target_client: TargetClient, + /// Current block reward summary + pub selected_reward: P::SourceChainBalance, + /// Current block cost summary + pub selected_cost: P::SourceChainBalance, + /// Messages size summary + pub selected_size: u32, + + /// Current block reward summary + pub total_reward: P::SourceChainBalance, + /// All confirmations cost + pub total_confirmations_cost: P::SourceChainBalance, + /// Current block cost summary + pub total_cost: P::SourceChainBalance, + + /// Hard check begin nonce + pub hard_selected_begin_nonce: MessageNonce, + /// Count prepaid nonces + pub selected_prepaid_nonces: MessageNonce, + /// Unpaid nonces weight summary + pub selected_unpaid_weight: Weight, + + /// Index by all ready nonces + pub index: usize, + /// Current nonce + pub nonce: MessageNonce, + /// Current nonce details + pub details: MessageDetails, +} + +/// Relay reference data +pub struct RelayMessagesBatchReference< + P: MessageLane, + SourceClient: MessageLaneSourceClient

, + TargetClient: MessageLaneTargetClient

, +> { + /// Maximal number of relayed messages in single delivery transaction. + pub max_messages_in_this_batch: MessageNonce, + /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. + pub max_messages_weight_in_single_batch: Weight, + /// Maximal cumulative size of relayed messages in single delivery transaction. + pub max_messages_size_in_single_batch: u32, + /// The client that is connected to the message lane source node. + pub lane_source_client: SourceClient, + /// The client that is connected to the message lane target node. + pub lane_target_client: TargetClient, + /// Source queue. + pub nonces_queue: SourceRangesQueue< + P::SourceHeaderHash, + P::SourceHeaderNumber, + MessageDetailsMap, + >, + /// Source queue range + pub nonces_queue_range: Range, +} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs new file mode 100644 index 0000000000000000000000000000000000000000..fd0a1ffafc8b91b860c99bf8fc44f93ec39d6cea --- /dev/null +++ b/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs @@ -0,0 +1,122 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rational relay strategy + +use async_trait::async_trait; +use num_traits::SaturatingAdd; + +use bp_messages::MessageNonce; + +use crate::{ + message_lane::MessageLane, + message_lane_loop::{ + SourceClient as MessageLaneSourceClient, TargetClient as MessageLaneTargetClient, + }, + relay_strategy::{RelayReference, RelayStrategy}, +}; + +/// The relayer will deliver all messages and confirmations as long as he's not losing any +/// funds. +#[derive(Clone)] +pub struct RationalStrategy; + +#[async_trait] +impl RelayStrategy for RationalStrategy { + async fn decide< + P: MessageLane, + SourceClient: MessageLaneSourceClient

, + TargetClient: MessageLaneTargetClient

, + >( + &mut self, + reference: &mut RelayReference, + ) -> bool { + // technically, multiple confirmations will be delivered in a single transaction, + // meaning less loses for relayer. But here we don't know the final relayer yet, so + // we're adding a separate transaction for every message. Normally, this cost is covered + // by the message sender. Probably reconsider this? + let confirmation_transaction_cost = + reference.lane_source_client.estimate_confirmation_transaction().await; + + let delivery_transaction_cost = match reference + .lane_target_client + .estimate_delivery_transaction_in_source_tokens( + reference.hard_selected_begin_nonce..= + (reference.hard_selected_begin_nonce + reference.index as MessageNonce), + reference.selected_prepaid_nonces, + reference.selected_unpaid_weight, + reference.selected_size as u32, + ) + .await + { + Ok(v) => v, + Err(err) => { + log::debug!( + target: "bridge", + "Failed to estimate delivery transaction cost: {:?}. No nonces selected for delivery", + err, + ); + return false + }, + }; + + // if it is the first message that makes reward less than cost, let's log it + // if this message makes batch profitable again, let's log it + let is_total_reward_less_than_cost = reference.total_reward < reference.total_cost; + let prev_total_cost = reference.total_cost; + let prev_total_reward = reference.total_reward; + reference.total_confirmations_cost = reference + .total_confirmations_cost + .saturating_add(&confirmation_transaction_cost); + reference.total_reward = reference.total_reward.saturating_add(&reference.details.reward); + reference.total_cost = + reference.total_confirmations_cost.saturating_add(&delivery_transaction_cost); + if !is_total_reward_less_than_cost && reference.total_reward < reference.total_cost { + log::debug!( + target: "bridge", + "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it larger than \ + total reward {:?}->{:?}", + reference.nonce, + reference.details.reward, + prev_total_cost, + reference.total_cost, + prev_total_reward, + reference.total_reward, + ); + } else if is_total_reward_less_than_cost && reference.total_reward >= reference.total_cost { + log::debug!( + target: "bridge", + "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it less than or \ + equal to the total reward {:?}->{:?} (again)", + reference.nonce, + reference.details.reward, + prev_total_cost, + reference.total_cost, + prev_total_reward, + reference.total_reward, + ); + } + + // Rational relayer never want to lose his funds + if reference.total_reward >= reference.total_cost { + reference.selected_reward = reference.total_reward; + reference.selected_cost = reference.total_cost; + return true + } + + false + } +} diff --git a/polkadot/bridges/relays/utils/Cargo.toml b/polkadot/bridges/relays/utils/Cargo.toml index ff80cab5338131ecc0c1becb3edb9c1d327f10ae..a08c3b3d688df07361b0c09872a38858de911c70 100644 --- a/polkadot/bridges/relays/utils/Cargo.toml +++ b/polkadot/bridges/relays/utils/Cargo.toml @@ -7,6 +7,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" [dependencies] ansi_term = "0.12" +anyhow = "1.0" async-std = "1.6.5" async-trait = "0.1.40" backoff = "0.2" @@ -19,6 +20,11 @@ num-traits = "0.2" serde_json = "1.0" sysinfo = "0.15" time = "0.2" +thiserror = "1.0.26" + +# Bridge dependencies + +bp-runtime = { path = "../../primitives/runtime" } # Substrate dependencies diff --git a/polkadot/bridges/relays/utils/src/error.rs b/polkadot/bridges/relays/utils/src/error.rs new file mode 100644 index 0000000000000000000000000000000000000000..26f1d0cacefd8eef5687e0102588f999859012a5 --- /dev/null +++ b/polkadot/bridges/relays/utils/src/error.rs @@ -0,0 +1,46 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use std::net::AddrParseError; +use thiserror::Error; + +/// Result type used by relay utilities. +pub type Result = std::result::Result; + +/// Relay utilities errors. +#[derive(Error, Debug)] +pub enum Error { + /// Failed to request a float value from HTTP service. + #[error("Failed to fetch token price from remote server: {0}")] + FetchTokenPrice(#[source] anyhow::Error), + /// Failed to parse the response from HTTP service. + #[error("Failed to parse HTTP service response: {0:?}. Response: {1:?}")] + ParseHttp(serde_json::Error, String), + /// Failed to select response value from the Json response. + #[error("Failed to select value from response: {0:?}. Response: {1:?}")] + SelectResponseValue(jsonpath_lib::JsonPathError, String), + /// Failed to parse float value from the selected value. + #[error( + "Failed to parse float value {0:?} from response. It is assumed to be positive and normal" + )] + ParseFloat(f64), + /// Couldn't found value in the JSON response. + #[error("Missing required value from response: {0:?}")] + MissingResponseValue(String), + /// Invalid host address was used for exposing Prometheus metrics. + #[error("Invalid host {0} is used to expose Prometheus metrics: {1}")] + ExposingMetricsInvalidHost(String, AddrParseError), + /// Prometheus error. + #[error("{0}")] + Prometheus(#[from] substrate_prometheus_endpoint::prometheus::Error), +} diff --git a/polkadot/bridges/relays/utils/src/initialize.rs b/polkadot/bridges/relays/utils/src/initialize.rs index b87937923bd4e0b70be329cb09190076fbc4a2e2..8c13a4d61cb3a5bc4062cf2ed1373bdb580fada1 100644 --- a/polkadot/bridges/relays/utils/src/initialize.rs +++ b/polkadot/bridges/relays/utils/src/initialize.rs @@ -62,14 +62,7 @@ pub fn initialize_logger(with_timestamp: bool) { let log_level = color_level(record.level()); let log_target = color_target(record.target()); - writeln!( - buf, - "{}{} {} {}", - loop_name_prefix(), - log_level, - log_target, - record.args(), - ) + writeln!(buf, "{}{} {} {}", loop_name_prefix(), log_level, log_target, record.args(),) }); } @@ -81,12 +74,14 @@ pub(crate) fn initialize_loop(loop_name: String) { LOOP_NAME.with(|g_loop_name| *g_loop_name.borrow_mut() = loop_name); } -/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop` call. +/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop` +/// call. fn loop_name_prefix() -> String { // try_with to avoid panic outside of async-std task context LOOP_NAME .try_with(|loop_name| { - // using borrow is ok here, because loop is only initialized once (=> borrow_mut will only be called once) + // using borrow is ok here, because loop is only initialized once (=> borrow_mut will + // only be called once) let loop_name = loop_name.borrow(); if loop_name.is_empty() { String::new() diff --git a/polkadot/bridges/relays/utils/src/lib.rs b/polkadot/bridges/relays/utils/src/lib.rs index 446e00cd23e672e95d6cdb978c454fb1b1f2f85b..a335be791242cf22ac55f2c9e7273345eacfe044 100644 --- a/polkadot/bridges/relays/utils/src/lib.rs +++ b/polkadot/bridges/relays/utils/src/lib.rs @@ -16,11 +16,14 @@ //! Utilities used by different relays. +pub use bp_runtime::HeaderId; +pub use error::Error; pub use relay_loop::{relay_loop, relay_metrics}; use backoff::{backoff::Backoff, ExponentialBackoff}; use futures::future::FutureExt; use std::time::Duration; +use thiserror::Error; /// Max delay after connection-unrelated error happened before we'll try the /// same request again. @@ -29,6 +32,7 @@ pub const MAX_BACKOFF_INTERVAL: Duration = Duration::from_secs(60); /// reconnection again. pub const CONNECTION_ERROR_DELAY: Duration = Duration::from_secs(10); +pub mod error; pub mod initialize; pub mod metrics; pub mod relay_loop; @@ -100,10 +104,6 @@ macro_rules! bail_on_arg_error { }; } -/// Ethereum header Id. -#[derive(Debug, Default, Clone, Copy, Eq, Hash, PartialEq)] -pub struct HeaderId(pub Number, pub Hash); - /// Error type that can signal connection errors. pub trait MaybeConnectionError { /// Returns true if error (maybe) represents connection error. @@ -111,11 +111,13 @@ pub trait MaybeConnectionError { } /// Stringified error that may be either connection-related or not. -#[derive(Debug)] +#[derive(Error, Debug)] pub enum StringifiedMaybeConnectionError { /// The error is connection-related error. + #[error("{0}")] Connection(String), /// The error is connection-unrelated error. + #[error("{0}")] NonConnection(String), } @@ -139,15 +141,6 @@ impl MaybeConnectionError for StringifiedMaybeConnectionError { } } -impl ToString for StringifiedMaybeConnectionError { - fn to_string(&self) -> String { - match *self { - StringifiedMaybeConnectionError::Connection(ref err) => err.clone(), - StringifiedMaybeConnectionError::NonConnection(ref err) => err.clone(), - } - } -} - /// Exponential backoff for connection-unrelated errors retries. pub fn retry_backoff() -> ExponentialBackoff { ExponentialBackoff { @@ -168,12 +161,12 @@ pub fn format_ids(mut ids: impl ExactSizeIterator { let id0 = ids.next().expect(NTH_PROOF); let id_last = ids.last().expect(NTH_PROOF); format!("{}:[{:?} ... {:?}]", len, id0, id_last) - } + }, } } @@ -220,7 +213,10 @@ impl ProcessFutureResult { /// Returns Ok(true) if future has succeeded. /// Returns Ok(false) if future has failed with non-connection error. /// Returns Err if future is `ConnectionFailed`. - pub fn fail_if_connection_error(self, failed_client: FailedClient) -> Result { + pub fn fail_if_connection_error( + self, + failed_client: FailedClient, + ) -> Result { match self { ProcessFutureResult::Success => Ok(true), ProcessFutureResult::Failed => Ok(false), @@ -247,7 +243,7 @@ where on_success(result); retry_backoff.reset(); ProcessFutureResult::Success - } + }, Err(error) if error.is_connection_error() => { log::error!( target: "bridge", @@ -259,7 +255,7 @@ where retry_backoff.reset(); go_offline_future.set(go_offline(CONNECTION_ERROR_DELAY).fuse()); ProcessFutureResult::ConnectionFailed - } + }, Err(error) => { let retry_delay = retry_backoff.next_backoff().unwrap_or(CONNECTION_ERROR_DELAY); log::error!( @@ -272,6 +268,6 @@ where go_offline_future.set(go_offline(retry_delay).fuse()); ProcessFutureResult::Failed - } + }, } } diff --git a/polkadot/bridges/relays/utils/src/metrics.rs b/polkadot/bridges/relays/utils/src/metrics.rs index c0eaeae337ee10354286b70d59a2cca5c9916fcf..805fe70bfe8586d8052c446c5f143f0f885c748f 100644 --- a/polkadot/bridges/relays/utils/src/metrics.rs +++ b/polkadot/bridges/relays/utils/src/metrics.rs @@ -21,12 +21,16 @@ pub use substrate_prometheus_endpoint::{ register, Counter, CounterVec, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, U64, }; +use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; use std::{fmt::Debug, time::Duration}; mod float_json_value; mod global; +/// Shared reference to `f64` value that is updated by the metric. +pub type F64SharedRef = Arc>>; + /// Unparsed address that needs to be used to expose Prometheus metrics. #[derive(Debug, Clone)] pub struct MetricsAddress { @@ -42,28 +46,38 @@ pub struct MetricsParams { /// Interface and TCP port to be used when exposing Prometheus metrics. pub address: Option, /// Metrics registry. May be `Some(_)` if several components share the same endpoint. - pub registry: Option, - /// Prefix that must be used in metric names. - pub metrics_prefix: Option, + pub registry: Registry, } -/// Metrics API. -pub trait Metrics: Clone + Send + Sync + 'static {} - -impl Metrics for T {} +/// Metric API. +pub trait Metric: Clone + Send + Sync + 'static { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError>; +} -/// Standalone metrics API. +/// Standalone metric API. /// /// Metrics of this kind know how to update themselves, so we may just spawn and forget the /// asynchronous self-update task. #[async_trait] -pub trait StandaloneMetrics: Metrics { +pub trait StandaloneMetric: Metric { /// Update metric values. async fn update(&self); /// Metrics update interval. fn update_interval(&self) -> Duration; + /// Register and spawn metric. Metric is only spawned if it is registered for the first time. + fn register_and_spawn(self, registry: &Registry) -> Result<(), PrometheusError> { + match self.register(registry) { + Ok(()) => { + self.spawn(); + Ok(()) + }, + Err(PrometheusError::AlreadyReg) => Ok(()), + Err(e) => Err(e), + } + } + /// Spawn the self update task that will keep update metric value at given intervals. fn spawn(self) { async_std::task::spawn(async move { @@ -78,21 +92,14 @@ pub trait StandaloneMetrics: Metrics { impl Default for MetricsAddress { fn default() -> Self { - MetricsAddress { - host: "127.0.0.1".into(), - port: 9616, - } + MetricsAddress { host: "127.0.0.1".into(), port: 9616 } } } impl MetricsParams { /// Creates metrics params so that metrics are not exposed. pub fn disabled() -> Self { - MetricsParams { - address: None, - registry: None, - metrics_prefix: None, - } + MetricsParams { address: None, registry: Registry::new() } } /// Do not expose metrics. @@ -100,21 +107,11 @@ impl MetricsParams { self.address = None; self } - - /// Set prefix to use in metric names. - pub fn metrics_prefix(mut self, prefix: String) -> Self { - self.metrics_prefix = Some(prefix); - self - } } impl From> for MetricsParams { fn from(address: Option) -> Self { - MetricsParams { - address, - registry: None, - metrics_prefix: None, - } + MetricsParams { address, registry: Registry::new() } } } @@ -130,7 +127,10 @@ pub fn metric_name(prefix: Option<&str>, name: &str) -> String { /// Set value of gauge metric. /// /// If value is `Ok(None)` or `Err(_)`, metric would have default value. -pub fn set_gauge_value, E: Debug>(gauge: &Gauge, value: Result, E>) { +pub fn set_gauge_value, E: Debug>( + gauge: &Gauge, + value: Result, E>, +) { gauge.set(match value { Ok(Some(value)) => { log::trace!( @@ -140,7 +140,7 @@ pub fn set_gauge_value, E: Debug>(gauge: &G value, ); value - } + }, Ok(None) => { log::warn!( target: "bridge-metrics", @@ -148,7 +148,7 @@ pub fn set_gauge_value, E: Debug>(gauge: &G gauge.desc().first().map(|d| &d.fq_name), ); Default::default() - } + }, Err(error) => { log::warn!( target: "bridge-metrics", @@ -157,6 +157,6 @@ pub fn set_gauge_value, E: Debug>(gauge: &G error, ); Default::default() - } + }, }) } diff --git a/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs b/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs index d61f9cac7c2247f23fb397d6e7cc8523161a25d0..7535cbef9863f6bfc6372f5d7d23b53d5785408c 100644 --- a/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs +++ b/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs @@ -14,8 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64}; - +use crate::{ + error::{self, Error}, + metrics::{ + metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, + StandaloneMetric, F64, + }, +}; + +use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; use std::time::Duration; @@ -23,86 +30,90 @@ use std::time::Duration; const UPDATE_INTERVAL: Duration = Duration::from_secs(60); /// Metric that represents float value received from HTTP service as float gauge. +/// +/// The float value returned by the service is assumed to be normal (`f64::is_normal` +/// should return `true`) and strictly positive. #[derive(Debug, Clone)] pub struct FloatJsonValueMetric { url: String, json_path: String, metric: Gauge, + shared_value_ref: F64SharedRef, } impl FloatJsonValueMetric { /// Create new metric instance with given name and help. pub fn new( - registry: &Registry, - prefix: Option<&str>, url: String, json_path: String, name: String, help: String, ) -> Result { + let shared_value_ref = Arc::new(RwLock::new(None)); Ok(FloatJsonValueMetric { url, json_path, - metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + metric: Gauge::new(metric_name(None, &name), help)?, + shared_value_ref, }) } - /// Read value from HTTP service. - async fn read_value(&self) -> Result { + /// Get shared reference to metric value. + pub fn shared_value_ref(&self) -> F64SharedRef { + self.shared_value_ref.clone() + } + + /// Request value from HTTP service. + async fn request_value(&self) -> anyhow::Result { use isahc::{AsyncReadResponseExt, HttpClient, Request}; - fn map_isahc_err(err: impl std::fmt::Display) -> String { - format!("Failed to fetch token price from remote server: {}", err) - } - - let request = Request::get(&self.url) - .header("Accept", "application/json") - .body(()) - .map_err(map_isahc_err)?; - let raw_response = HttpClient::new() - .map_err(map_isahc_err)? - .send_async(request) - .await - .map_err(map_isahc_err)? - .text() - .await - .map_err(map_isahc_err)?; + let request = Request::get(&self.url).header("Accept", "application/json").body(())?; + let raw_response = HttpClient::new()?.send_async(request).await?.text().await?; + Ok(raw_response) + } + /// Read value from HTTP service. + async fn read_value(&self) -> error::Result { + let raw_response = self.request_value().await.map_err(Error::FetchTokenPrice)?; parse_service_response(&self.json_path, &raw_response) } } +impl Metric for FloatJsonValueMetric { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.metric.clone(), registry).map(drop) + } +} + #[async_trait] -impl StandaloneMetrics for FloatJsonValueMetric { +impl StandaloneMetric for FloatJsonValueMetric { fn update_interval(&self) -> Duration { UPDATE_INTERVAL } async fn update(&self) { - crate::metrics::set_gauge_value(&self.metric, self.read_value().await.map(Some)); + let value = self.read_value().await; + let maybe_ok = value.as_ref().ok().copied(); + crate::metrics::set_gauge_value(&self.metric, value.map(Some)); + *self.shared_value_ref.write().await = maybe_ok; } } /// Parse HTTP service response. -fn parse_service_response(json_path: &str, response: &str) -> Result { - let json = serde_json::from_str(response).map_err(|err| { - format!( - "Failed to parse HTTP service response: {:?}. Response: {:?}", - err, response, - ) - })?; +fn parse_service_response(json_path: &str, response: &str) -> error::Result { + let json = + serde_json::from_str(response).map_err(|err| Error::ParseHttp(err, response.to_owned()))?; let mut selector = jsonpath_lib::selector(&json); - let maybe_selected_value = selector(json_path).map_err(|err| { - format!( - "Failed to select value from response: {:?}. Response: {:?}", - err, response, - ) - })?; + let maybe_selected_value = + selector(json_path).map_err(|err| Error::SelectResponseValue(err, response.to_owned()))?; let selected_value = maybe_selected_value .first() .and_then(|v| v.as_f64()) - .ok_or_else(|| format!("Missing required value from response: {:?}", response,))?; + .ok_or_else(|| Error::MissingResponseValue(response.to_owned()))?; + if !selected_value.is_normal() || selected_value < 0.0 { + return Err(Error::ParseFloat(selected_value)) + } Ok(selected_value) } @@ -118,4 +129,19 @@ mod tests { Ok(433.05), ); } + + #[test] + fn parse_service_response_rejects_negative_numbers() { + assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":-433.05}}"#).is_err()); + } + + #[test] + fn parse_service_response_rejects_zero_numbers() { + assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":0.0}}"#).is_err()); + } + + #[test] + fn parse_service_response_rejects_nan() { + assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":NaN}}"#).is_err()); + } } diff --git a/polkadot/bridges/relays/utils/src/metrics/global.rs b/polkadot/bridges/relays/utils/src/metrics/global.rs index d212480510448339328494b7b470b12b6fec4fad..df90a2c4823471856a07d0afd69777e621510000 100644 --- a/polkadot/bridges/relays/utils/src/metrics/global.rs +++ b/polkadot/bridges/relays/utils/src/metrics/global.rs @@ -17,7 +17,8 @@ //! Global system-wide Prometheus metrics exposed by relays. use crate::metrics::{ - metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics, F64, U64, + metric_name, register, Gauge, GaugeVec, Metric, Opts, PrometheusError, Registry, + StandaloneMetric, F64, U64, }; use async_std::sync::{Arc, Mutex}; @@ -39,33 +40,36 @@ pub struct GlobalMetrics { impl GlobalMetrics { /// Create and register global metrics. - pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + pub fn new() -> Result { Ok(GlobalMetrics { system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), - system_average_load: register( - GaugeVec::new( - Opts::new(metric_name(prefix, "system_average_load"), "System load average"), - &["over"], - )?, - registry, + system_average_load: GaugeVec::new( + Opts::new(metric_name(None, "system_average_load"), "System load average"), + &["over"], )?, - process_cpu_usage_percentage: register( - Gauge::new(metric_name(prefix, "process_cpu_usage_percentage"), "Process CPU usage")?, - registry, + process_cpu_usage_percentage: Gauge::new( + metric_name(None, "process_cpu_usage_percentage"), + "Process CPU usage", )?, - process_memory_usage_bytes: register( - Gauge::new( - metric_name(prefix, "process_memory_usage_bytes"), - "Process memory (resident set size) usage", - )?, - registry, + process_memory_usage_bytes: Gauge::new( + metric_name(None, "process_memory_usage_bytes"), + "Process memory (resident set size) usage", )?, }) } } +impl Metric for GlobalMetrics { + fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { + register(self.system_average_load.clone(), registry)?; + register(self.process_cpu_usage_percentage.clone(), registry)?; + register(self.process_memory_usage_bytes.clone(), registry)?; + Ok(()) + } +} + #[async_trait] -impl StandaloneMetrics for GlobalMetrics { +impl StandaloneMetric for GlobalMetrics { async fn update(&self) { // update system-wide metrics let mut system = self.system.lock().await; @@ -92,16 +96,19 @@ impl StandaloneMetrics for GlobalMetrics { memory_usage, ); - self.process_cpu_usage_percentage - .set(if cpu_usage.is_finite() { cpu_usage } else { 0f64 }); + self.process_cpu_usage_percentage.set(if cpu_usage.is_finite() { + cpu_usage + } else { + 0f64 + }); self.process_memory_usage_bytes.set(memory_usage); - } + }, _ => { log::warn!( target: "bridge-metrics", "Failed to refresh process information. Metrics may show obsolete values", ); - } + }, } } diff --git a/polkadot/bridges/relays/utils/src/relay_loop.rs b/polkadot/bridges/relays/utils/src/relay_loop.rs index 938136658bd31bf5890b4daf47ea3c130fd3f5be..a992aaaf57ee505f970e86d6b571115702b43621 100644 --- a/polkadot/bridges/relays/utils/src/relay_loop.rs +++ b/polkadot/bridges/relays/utils/src/relay_loop.rs @@ -14,8 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics}; -use crate::{FailedClient, MaybeConnectionError}; +use crate::{ + error::Error, + metrics::{Metric, MetricsAddress, MetricsParams}, + FailedClient, MaybeConnectionError, +}; use async_trait::async_trait; use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration}; @@ -27,25 +30,30 @@ pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); /// Basic blockchain client from relay perspective. #[async_trait] pub trait Client: 'static + Clone + Send + Sync { - /// Type of error this clients returns. + /// Type of error these clients returns. type Error: 'static + Debug + MaybeConnectionError + Send + Sync; /// Try to reconnect to source node. async fn reconnect(&mut self) -> Result<(), Self::Error>; } +#[async_trait] +impl Client for () { + type Error = crate::StringifiedMaybeConnectionError; + + async fn reconnect(&mut self) -> Result<(), Self::Error> { + Ok(()) + } +} + /// Returns generic loop that may be customized and started. pub fn relay_loop(source_client: SC, target_client: TC) -> Loop { - Loop { - reconnect_delay: RECONNECT_DELAY, - source_client, - target_client, - loop_metric: None, - } + Loop { reconnect_delay: RECONNECT_DELAY, source_client, target_client, loop_metric: None } } -/// Returns generic relay loop metrics that may be customized and used in one or several relay loops. -pub fn relay_metrics(prefix: Option, params: MetricsParams) -> LoopMetrics<(), (), ()> { +/// Returns generic relay loop metrics that may be customized and used in one or several relay +/// loops. +pub fn relay_metrics(params: MetricsParams) -> LoopMetrics<(), (), ()> { LoopMetrics { relay_loop: Loop { reconnect_delay: RECONNECT_DELAY, @@ -54,8 +62,7 @@ pub fn relay_metrics(prefix: Option, params: MetricsParams) -> LoopMetri loop_metric: None, }, address: params.address, - registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), - metrics_prefix: params.metrics_prefix, + registry: params.registry, loop_metric: None, } } @@ -73,7 +80,6 @@ pub struct LoopMetrics { relay_loop: Loop, address: Option, registry: Registry, - metrics_prefix: Option, loop_metric: Option, } @@ -85,7 +91,7 @@ impl Loop { } /// Start building loop metrics using given prefix. - pub fn with_metrics(self, prefix: Option, params: MetricsParams) -> LoopMetrics { + pub fn with_metrics(self, params: MetricsParams) -> LoopMetrics { LoopMetrics { relay_loop: Loop { reconnect_delay: self.reconnect_delay, @@ -94,18 +100,17 @@ impl Loop { loop_metric: None, }, address: params.address, - registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), - metrics_prefix: params.metrics_prefix, + registry: params.registry, loop_metric: None, } } /// Run relay loop. /// - /// This function represents an outer loop, which in turn calls provided `run_loop` function to do - /// actual job. When `run_loop` returns, this outer loop reconnects to failed client (source, + /// This function represents an outer loop, which in turn calls provided `run_loop` function to + /// do actual job. When `run_loop` returns, this outer loop reconnects to failed client (source, /// target or both) and calls `run_loop` again. - pub async fn run(mut self, loop_name: String, run_loop: R) -> Result<(), String> + pub async fn run(mut self, loop_name: String, run_loop: R) -> Result<(), Error> where R: 'static + Send + Fn(SC, TC, Option) -> F, F: 'static + Send + Future>, @@ -118,20 +123,20 @@ impl Loop { loop { let loop_metric = self.loop_metric.clone(); - let future_result = run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric); + let future_result = + run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric); let result = future_result.await; match result { Ok(()) => break, - Err(failed_client) => { + Err(failed_client) => reconnect_failed_client( failed_client, self.reconnect_delay, &mut self.source_client, &mut self.target_client, ) - .await - } + .await, } log::debug!(target: "bridge", "Restarting relay loop"); @@ -148,58 +153,35 @@ impl LoopMetrics { /// Add relay loop metrics. /// /// Loop metrics will be passed to the loop callback. - pub fn loop_metric( + pub fn loop_metric( self, - create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, - ) -> Result, String> { - let loop_metric = create_metric(&self.registry, self.metrics_prefix.as_deref()).map_err(|e| e.to_string())?; + metric: NewLM, + ) -> Result, Error> { + metric.register(&self.registry)?; Ok(LoopMetrics { relay_loop: self.relay_loop, address: self.address, registry: self.registry, - metrics_prefix: self.metrics_prefix, - loop_metric: Some(loop_metric), + loop_metric: Some(metric), }) } - /// Add standalone metrics. - pub fn standalone_metric( - self, - create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, - ) -> Result { - // since standalone metrics are updating themselves, we may just ignore the fact that the same - // standalone metric is exposed by several loops && only spawn single metric - match create_metric(&self.registry, self.metrics_prefix.as_deref()) { - Ok(standalone_metrics) => standalone_metrics.spawn(), - Err(PrometheusError::AlreadyReg) => (), - Err(e) => return Err(e.to_string()), - } - - Ok(self) - } - /// Convert into `MetricsParams` structure so that metrics registry may be extended later. pub fn into_params(self) -> MetricsParams { - MetricsParams { - address: self.address, - registry: Some(self.registry), - metrics_prefix: self.metrics_prefix, - } + MetricsParams { address: self.address, registry: self.registry } } /// Expose metrics using address passed at creation. /// /// If passed `address` is `None`, metrics are not exposed. - pub async fn expose(self) -> Result, String> { + pub async fn expose(self) -> Result, Error> { if let Some(address) = self.address { let socket_addr = SocketAddr::new( - address.host.parse().map_err(|err| { - format!( - "Invalid host {} is used to expose Prometheus metrics: {}", - address.host, err, - ) - })?, + address + .host + .parse() + .map_err(|err| Error::ExposingMetricsInvalidHost(address.host.clone(), err))?, address.port, ); @@ -242,8 +224,8 @@ pub async fn reconnect_failed_client( reconnect_delay.as_secs(), error, ); - continue; - } + continue + }, } } if failed_client == FailedClient::Both || failed_client == FailedClient::Target { @@ -256,22 +238,11 @@ pub async fn reconnect_failed_client( reconnect_delay.as_secs(), error, ); - continue; - } + continue + }, } } - break; - } -} - -/// Create new registry with global metrics. -fn create_metrics_registry(prefix: Option) -> Registry { - match prefix { - Some(prefix) => { - assert!(!prefix.is_empty(), "Metrics prefix can not be empty"); - Registry::new_custom(Some(prefix), None).expect("only fails if prefix is empty; prefix is not empty; qed") - } - None => Registry::new(), + break } } diff --git a/polkadot/bridges/rustfmt.toml b/polkadot/bridges/rustfmt.toml index 8ded863e80af2390432ee5db3b9f65848f3eefad..082150daf04ee39ada660c315fd0f5bbcf99dea0 100644 --- a/polkadot/bridges/rustfmt.toml +++ b/polkadot/bridges/rustfmt.toml @@ -1,3 +1,24 @@ +# Basic hard_tabs = true -max_width = 120 -edition = "2018" +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Format comments +comment_width = 100 +wrap_comments = true +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true + diff --git a/polkadot/bridges/scripts/run-eth2sub-relay.sh b/polkadot/bridges/scripts/run-eth2sub-relay.sh deleted file mode 100755 index 2cf64a93780d21180aa300f3423d7f8748e9da89..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/run-eth2sub-relay.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Ethereum to Substrate relay. Needs running -# Substrate and Ethereum nodes in order to work. - -RUST_LOG=rpc=trace,bridge=trace ./target/debug/ethereum-poa-relay eth-to-sub diff --git a/polkadot/bridges/scripts/run-openethereum-node.sh b/polkadot/bridges/scripts/run-openethereum-node.sh deleted file mode 100755 index 62089baffe458d54bc9222f6403522babf2822ec..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/run-openethereum-node.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# This script assumes that an OpenEthereum build is available. The repo -# should be at the same level as the `parity-bridges-common` repo. - -RUST_LOG=rpc=trace,txqueue=trace,bridge-builtin=trace \ -../openethereum/target/debug/openethereum \ - --config="$(pwd)"/deployments/dev/poa-config/poa-node-config \ - --node-key=arthur \ - --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 \ - --base-path=/tmp/oe-dev-node \ diff --git a/polkadot/bridges/scripts/send-message-from-millau-rialto.sh b/polkadot/bridges/scripts/send-message-from-millau-rialto.sh index 10fe24087fa40fa89bbeeb23e2567f71ed886e4d..d14b08021ee158ec316afdb1113d9bfb23540f4a 100755 --- a/polkadot/bridges/scripts/send-message-from-millau-rialto.sh +++ b/polkadot/bridges/scripts/send-message-from-millau-rialto.sh @@ -11,7 +11,7 @@ MILLAU_PORT="${RIALTO_PORT:-9945}" case "$1" in remark) RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message MillauToRialto \ + ./target/debug/substrate-relay send-message millau-to-rialto \ --source-host localhost \ --source-port $MILLAU_PORT \ --source-signer //Alice \ @@ -22,7 +22,7 @@ case "$1" in ;; transfer) RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message MillauToRialto \ + ./target/debug/substrate-relay send-message millau-to-rialto \ --source-host localhost \ --source-port $MILLAU_PORT \ --source-signer //Alice \ diff --git a/polkadot/bridges/scripts/send-message-from-rialto-millau.sh b/polkadot/bridges/scripts/send-message-from-rialto-millau.sh index 52d19e3af8839cf179544cc7aa8e8531b7a92265..10582aa6b3a725ff1101736253209c33773df79c 100755 --- a/polkadot/bridges/scripts/send-message-from-rialto-millau.sh +++ b/polkadot/bridges/scripts/send-message-from-rialto-millau.sh @@ -11,7 +11,7 @@ RIALTO_PORT="${RIALTO_PORT:-9944}" case "$1" in remark) RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message RialtoToMillau \ + ./target/debug/substrate-relay send-message rialto-to-millau \ --source-host localhost \ --source-port $RIALTO_PORT \ --target-signer //Alice \ @@ -22,7 +22,7 @@ case "$1" in ;; transfer) RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message RialtoToMillau \ + ./target/debug/substrate-relay send-message rialto-to-millau \ --source-host localhost \ --source-port $RIALTO_PORT \ --target-signer //Alice \ diff --git a/polkadot/bridges/scripts/update-weights.sh b/polkadot/bridges/scripts/update-weights.sh index 0ac773e8d7b46d2f4c9dd5b3ed65d1171c459831..5ee7bb9e8d8e1c42a24ffdeead289441569c8315 100755 --- a/polkadot/bridges/scripts/update-weights.sh +++ b/polkadot/bridges/scripts/update-weights.sh @@ -29,3 +29,15 @@ time cargo run --release -p rialto-bridge-node --features=runtime-benchmarks -- --heap-pages=4096 \ --output=./modules/grandpa/src/weights.rs \ --template=./.maintain/rialto-weight-template.hbs + +time cargo run --release -p millau-bridge-node --features=runtime-benchmarks -- benchmark \ + --chain=dev \ + --steps=50 \ + --repeat=20 \ + --pallet=pallet_bridge_token_swap \ + --extrinsic=* \ + --execution=wasm \ + --wasm-execution=Compiled \ + --heap-pages=4096 \ + --output=./modules/token-swap/src/weights.rs \ + --template=./.maintain/millau-weight-template.hbs