diff --git a/polkadot/bridges/.dependabot/config.yml b/polkadot/bridges/.dependabot/config.yml deleted file mode 100644 index 1972b3b94a2ab89337de1f37fdc9693ca27a5af4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.dependabot/config.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: 1 -update_configs: - - package_manager: "rust:cargo" - directory: "/" - update_schedule: "weekly" - ignored_updates: - - match: - dependency_name: "sp-*" - - match: - dependency_name: "sc-*" - - match: - dependency_name: "substrate-*" - - match: - dependency_name: "frame-*" - - match: - dependency_name: "pallet-*" - automerged_updates: - - match: - update_type: "all" - version_requirement_updates: "auto" diff --git a/polkadot/bridges/.dockerignore b/polkadot/bridges/.dockerignore deleted file mode 100644 index f4ceea785605464ac9d15c666398242c43283fa0..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -**/target/ diff --git a/polkadot/bridges/.editorconfig b/polkadot/bridges/.editorconfig deleted file mode 100644 index d67ffe8f90f4fadd4771c4423438d6e3c08b253c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.editorconfig +++ /dev/null @@ -1,16 +0,0 @@ -root = true -[*] -indent_style=tab -indent_size=tab -tab_width=4 -end_of_line=lf -charset=utf-8 -trim_trailing_whitespace=true -max_line_length=100 -insert_final_newline=true - -[*.{yml,md,yaml,sh}] -indent_style=space -indent_size=2 -tab_width=8 -end_of_line=lf diff --git a/polkadot/bridges/.github/workflows/deny.yml b/polkadot/bridges/.github/workflows/deny.yml deleted file mode 100644 index e444b11da8d58245507048d62f6d09c26ff22a3c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.github/workflows/deny.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Cargo deny - -on: - pull_request: - schedule: - - cron: '0 0 * * *' - push: - branches: - - master - tags: - - v* - paths-ignore: - - '**/README.md' - - diagrams/* - - docs/* -jobs: - cargo-deny: - runs-on: ubuntu-latest - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Cargo deny - uses: EmbarkStudios/cargo-deny-action@v1 - with: - command: "check --hide-inclusion-graph" diff --git a/polkadot/bridges/.github/workflows/lint.yml b/polkadot/bridges/.github/workflows/lint.yml deleted file mode 100644 index b30a72a4c4e73cd0b5881f58bb5dd0364cc499c9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.github/workflows/lint.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Check style - -on: - pull_request: - push: - branches: - - master - tags: - - v* - paths-ignore: - - '**/README.md' - - diagrams/* - - docs/* - schedule: # Weekly build - - cron: '0 0 * * 0' -jobs: -## Check stage - check-fmt: - name: Check RustFmt - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Add rustfmt - run: rustup component add rustfmt - - name: rust-fmt check - uses: actions-rs/cargo@master - with: - command: fmt - args: --all -- --check diff --git a/polkadot/bridges/.github/workflows/publish-deps.yml b/polkadot/bridges/.github/workflows/publish-deps.yml deleted file mode 100644 index 799aa8f96672395cb88fa836c9f555a0889c0f21..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.github/workflows/publish-deps.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Publish Dependencies to Docker hub - -on: - push: - tags: - - v* - paths-ignore: - - '**/README.md' - - diagrams/* - - docs/* - schedule: # Weekly build - - cron: '0 0 * * 0' - -jobs: -## Publish to Docker hub - publish: - name: Publishing - runs-on: ubuntu-latest - container: - image: docker:git - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Build and push dependencies - uses: docker/build-push-action@v1 - with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASSWORD }} - repository: paritytech/bridge-dependencies - dockerfile: deployments/BridgeDeps.Dockerfile - tag_with_ref: true - tag_with_sha: true - labels: - org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.authors="devops-team@parity.io", - org.opencontainers.image.vendor="Parity Technologies", - org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/README.md", - org.opencontainers.image.title=${{ matrix.project }}, - org.opencontainers.image.description="${{ matrix.project }} - component of Parity Bridges Common", - org.opencontainers.image.licenses="GPL-3.0 License" - add_git_labels: true diff --git a/polkadot/bridges/.github/workflows/publish-docker.yml b/polkadot/bridges/.github/workflows/publish-docker.yml deleted file mode 100644 index 3e00ead6102c08a0f0bff7169674a2e68a7a677b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.github/workflows/publish-docker.yml +++ /dev/null @@ -1,76 +0,0 @@ -name: Publish images to Docker hub - -on: - push: - tags: - - v* - paths-ignore: - - '**/README.md' - - diagrams/* - - docs/* - schedule: # Nightly build - - cron: '0 1 * * *' - -jobs: -## Publish to Docker hub - publish: - name: Publishing - strategy: - matrix: - project: - - rialto-bridge-node - - millau-bridge-node - - ethereum-poa-relay - - substrate-relay - include: - - project: rialto-bridge-node - healthcheck: http://localhost:9933/health - - project: millau-bridge-node - healthcheck: http://localhost:9933/health - - project: ethereum-poa-relay - healthcheck: http://localhost:9616/metrics - - project: substrate-relay - healthcheck: http://localhost:9616/metrics - runs-on: ubuntu-latest - container: - image: docker:git - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Set vars - id: vars - run: | - echo ::set-output name=DATE::$(date +%d-%m-%Y) - if [[ ${GITHUB_REF} = refs/tags/* ]] - then - echo ::set-output name=TAG::${GITHUB_REF#refs/tags/} - else - echo ::set-output name=TAG::nightly-$(date +%d-%m-%Y) - fi - - name: Build and push ${{ matrix.project }} - uses: docker/build-push-action@v1 - with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASSWORD }} - repository: paritytech/${{ matrix.project }} - build_args: PROJECT=${{ matrix.project }}, HEALTH=${{ matrix.healthcheck }} - tags: ${{ steps.vars.outputs.TAG }}, latest - labels: - org.opencontainers.image.created=${{ steps.vars.outputs.DATE }}, - org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.authors="devops-team@parity.io", - org.opencontainers.image.vendor="Parity Technologies", - org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common", - org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/README.md", - org.opencontainers.image.version=${{ steps.vars.outputs.TAG }}, - org.opencontainers.image.title=${{ matrix.project }}, - org.opencontainers.image.description="${{ matrix.project }} - component of Parity Bridges Common", - org.opencontainers.image.licenses="GPL-3.0 License" - add_git_labels: true diff --git a/polkadot/bridges/.github/workflows/rust.yml b/polkadot/bridges/.github/workflows/rust.yml deleted file mode 100644 index 3fe73363bf19581ef9f2fe41aaf79bc2df72401f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.github/workflows/rust.yml +++ /dev/null @@ -1,159 +0,0 @@ -name: Compilation and Testing Suite - -on: - pull_request: - push: - branches: - - master - tags: - - v* - paths-ignore: - - '**/README.md' - - diagrams/* - - docs/* - schedule: # Weekly build - - cron: '0 0 * * 0' -jobs: - -## Check Stage - check-test: - name: Check and test - strategy: - matrix: - toolchain: - - stable - #- beta - - nightly - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - NIGHTLY: nightly #if necessary, specify the version, nightly-2020-10-04, etc. - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Install Toolchain - run: rustup toolchain add $NIGHTLY - - name: Add WASM Utilities - run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - name: Rust Cache - uses: Swatinem/rust-cache@v1.2.0 - - name: Checking rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - with: - command: check - toolchain: ${{ matrix.toolchain }} - args: --all --verbose - -## Test Stage - - name: Testing rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - if: matrix.toolchain == 'stable' - with: - command: test - toolchain: ${{ matrix.toolchain }} - args: --all --verbose - -## Check Node Benchmarks - - name: Check Rialto benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - with: - command: check - toolchain: ${{ matrix.toolchain }} - args: --manifest-path ./bin/rialto/node/Cargo.toml --no-default-features --features runtime-benchmarks --verbose - - name: Check Millau benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - with: - command: check - toolchain: ${{ matrix.toolchain }} - args: --manifest-path ./bin/millau/node/Cargo.toml --no-default-features --features runtime-benchmarks --verbose - -## Build Stage - build: - name: Build - strategy: - matrix: - toolchain: - - stable - #- beta - - nightly - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - NIGHTLY: nightly #if necessary, specify the version, nightly-2020-10-04, etc. - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Install Toolchain - run: rustup toolchain add $NIGHTLY - - name: Add WASM Utilities - run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - name: Rust Cache - uses: Swatinem/rust-cache@v1.2.0 - - name: Building rust-${{ matrix.toolchain }} - uses: actions-rs/cargo@master - if: github.ref == 'refs/heads/master' - with: - command: build - toolchain: ${{ matrix.toolchain }} - args: --all --verbose - - name: Prepare artifacts - if: github.ref == 'refs/heads/master' - run: | - mkdir -p ./artifacts; - mv -v target/debug/rialto-bridge-node ./artifacts/; - mv -v target/debug/millau-bridge-node ./artifacts/; - mv -v target/debug/ethereum-poa-relay ./artifacts/; - mv -v target/debug/substrate-relay ./artifacts/; - shell: bash - - name: Upload artifacts - if: github.ref == 'refs/heads/master' - uses: actions/upload-artifact@v1 - with: - name: ${{ matrix.toolchain }}.zip - path: artifacts/ - - ## Linting Stage - clippy: - name: Clippy - runs-on: ubuntu-latest - env: - RUST_BACKTRACE: full - NIGHTLY: nightly #if necessary, specify the version, nightly-2020-10-04, etc. - steps: - - name: Cancel Previous Runs - uses: styfle/cancel-workflow-action@0.4.1 - with: - access_token: ${{ github.token }} - - name: Checkout sources & submodules - uses: actions/checkout@master - with: - fetch-depth: 5 - submodules: recursive - - name: Install Toolchain - run: rustup toolchain add $NIGHTLY - - name: Add WASM Utilities - run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY - - name: Add clippy - run: rustup component add clippy --toolchain $NIGHTLY - - name: Rust Cache - uses: Swatinem/rust-cache@v1.2.0 - - name: Clippy - uses: actions-rs/cargo@master - with: - command: clippy - toolchain: nightly #if necessary, specify the version, nightly-2020-10-04, etc. - args: --all-targets -- -D warnings diff --git a/polkadot/bridges/.gitignore b/polkadot/bridges/.gitignore deleted file mode 100644 index cc9ede9aef666801ec76cc95f412b9a0af11718f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -**/target/ -**/.env -**/.env2 -**/rust-toolchain - -**/*.rs.bk - -*.o -*.so -*.rlib -*.dll -.gdb_history - -*.exe - -.DS_Store - -.idea -.vscode -*.iml -*.swp -*.swo diff --git a/polkadot/bridges/.maintain/rialto-weight-template.hbs b/polkadot/bridges/.maintain/rialto-weight-template.hbs deleted file mode 100644 index c8d6725a7f5e2aef0f9ed103a2d86d2be9037d1f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/.maintain/rialto-weight-template.hbs +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for {{pallet}} -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} -//! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}} -//! LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} -//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}} -//! CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} - -// Executed Command: -{{#each args as |arg|~}} -// {{arg}} -{{/each}} - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for {{pallet}}. -pub trait WeightInfo { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{c.name}}: u32, {{/each~}} - ) -> Weight; - {{~/each}} -} - -/// Weights for {{pallet}} using the Rialto node and recommended hardware. -pub struct RialtoWeight(PhantomData); -impl WeightInfo for RialtoWeight { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} - ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} - } - {{~/each}} -} - -// For backwards compatibility and tests -impl WeightInfo for () { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} - ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} - .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} - .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} - .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} - .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} - } - {{~/each}} -} diff --git a/polkadot/bridges/Cargo.lock b/polkadot/bridges/Cargo.lock deleted file mode 100644 index 8e013b7dae75553048de8f53e44bb4cbc72b6889..0000000000000000000000000000000000000000 --- a/polkadot/bridges/Cargo.lock +++ /dev/null @@ -1,9422 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "addr2line" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - -[[package]] -name = "aead" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "aes" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" -dependencies = [ - "aes-soft", - "aesni", - "block-cipher", -] - -[[package]] -name = "aes-gcm" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" -dependencies = [ - "aead", - "aes", - "block-cipher", - "ghash", - "subtle 2.4.0", -] - -[[package]] -name = "aes-soft" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" -dependencies = [ - "block-cipher", - "byteorder", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aesni" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" -dependencies = [ - "block-cipher", - "opaque-debug 0.3.0", -] - -[[package]] -name = "ahash" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" - -[[package]] -name = "ahash" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" - -[[package]] -name = "aho-corasick" -version = "0.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "anyhow" -version = "1.0.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee67c11feeac938fae061b232e38e0b6d94f97a9df10e6271319325ac4c56a86" - -[[package]] -name = "approx" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" -dependencies = [ - "num-traits", -] - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "asn1_der" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" -dependencies = [ - "asn1_der_derive", -] - -[[package]] -name = "asn1_der_derive" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "async-channel" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59740d83946db6a5af71ae25ddf9562c2b176b2ca42cf99a455f09f4a220d6b9" -dependencies = [ - "concurrent-queue", - "event-listener", - "futures-core", -] - -[[package]] -name = "async-executor" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" -dependencies = [ - "async-task", - "concurrent-queue", - "fastrand", - "futures-lite", - "once_cell", - "vec-arena", -] - -[[package]] -name = "async-global-executor" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" -dependencies = [ - "async-channel", - "async-executor", - "async-io", - "async-mutex", - "blocking", - "futures-lite", - "num_cpus", - "once_cell", -] - -[[package]] -name = "async-io" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" -dependencies = [ - "concurrent-queue", - "fastrand", - "futures-lite", - "libc", - "log", - "nb-connect", - "once_cell", - "parking", - "polling", - "vec-arena", - "waker-fn", - "winapi 0.3.9", -] - -[[package]] -name = "async-lock" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-mutex" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-process" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8cea09c1fb10a317d1b5af8024eeba256d6554763e85ecd90ff8df31c7bbda" -dependencies = [ - "async-io", - "blocking", - "cfg-if 0.1.10", - "event-listener", - "futures-lite", - "once_cell", - "signal-hook", - "winapi 0.3.9", -] - -[[package]] -name = "async-std" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" -dependencies = [ - "async-channel", - "async-global-executor", - "async-io", - "async-lock", - "async-process", - "crossbeam-utils 0.8.1", - "futures-channel", - "futures-core", - "futures-io", - "futures-lite", - "gloo-timers", - "kv-log-macro", - "log", - "memchr", - "num_cpus", - "once_cell", - "pin-project-lite 0.2.4", - "pin-utils", - "slab", - "wasm-bindgen-futures", -] - -[[package]] -name = "async-task" -version = "4.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" - -[[package]] -name = "async-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce6977f57fa68da77ffe5542950d47e9c23d65f5bc7cb0a9f8700996913eec7" -dependencies = [ - "futures 0.3.12", - "rustls 0.16.0", - "webpki", - "webpki-roots 0.17.0", -] - -[[package]] -name = "async-trait" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "asynchronous-codec" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb4401f0a3622dad2e0763fa79e0eb328bc70fb7dccfdd645341f00d671247d6" -dependencies = [ - "bytes 1.0.0", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite 0.2.4", -] - -[[package]] -name = "asynchronous-codec" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" -dependencies = [ - "bytes 1.0.0", - "futures-sink", - "futures-util", - "memchr", - "pin-project-lite 0.2.4", -] - -[[package]] -name = "atomic" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" -dependencies = [ - "autocfg", -] - -[[package]] -name = "atomic-waker" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "autocfg" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" - -[[package]] -name = "backoff" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721c249ab59cbc483ad4294c9ee2671835c1e43e9ffc277e6b4ecfef733cfdc5" -dependencies = [ - "instant", - "rand 0.7.3", -] - -[[package]] -name = "backtrace" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base-x" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" - -[[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder", -] - -[[package]] -name = "base64" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7" - -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - -[[package]] -name = "base64" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" - -[[package]] -name = "bincode" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" -dependencies = [ - "byteorder", - "serde", -] - -[[package]] -name = "bindgen" -version = "0.54.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" -dependencies = [ - "bitflags", - "cexpr", - "cfg-if 0.1.10", - "clang-sys", - "clap", - "env_logger 0.7.1", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "which 3.1.1", -] - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "bitvec" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - -[[package]] -name = "blake2b_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake2s_simd" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake3" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "cc", - "cfg-if 0.1.10", - "constant_time_eq", - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.3", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.4", -] - -[[package]] -name = "block-cipher" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "blocking" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" -dependencies = [ - "async-channel", - "async-task", - "atomic-waker", - "fastrand", - "futures-lite", - "once_cell", -] - -[[package]] -name = "bp-currency-exchange" -version = "0.1.0" -dependencies = [ - "frame-support", - "parity-scale-codec", - "sp-api", - "sp-std", -] - -[[package]] -name = "bp-eth-poa" -version = "0.1.0" -dependencies = [ - "ethbloom 0.10.0", - "fixed-hash", - "hash-db", - "hex-literal 0.2.1", - "impl-rlp", - "impl-serde", - "libsecp256k1", - "parity-bytes", - "parity-scale-codec", - "plain_hasher", - "primitive-types", - "rlp 0.5.0", - "serde", - "serde-big-array", - "sp-api", - "sp-io", - "sp-runtime", - "sp-std", - "triehash", -] - -[[package]] -name = "bp-header-chain" -version = "0.1.0" -dependencies = [ - "bp-test-utils", - "finality-grandpa", - "frame-support", - "parity-scale-codec", - "serde", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-kusama" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-runtime", - "frame-support", - "frame-system", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-message-dispatch" -version = "0.1.0" -dependencies = [ - "bp-runtime", - "parity-scale-codec", -] - -[[package]] -name = "bp-message-lane" -version = "0.1.0" -dependencies = [ - "bp-runtime", - "frame-support", - "frame-system", - "parity-scale-codec", - "sp-std", -] - -[[package]] -name = "bp-millau" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-runtime", - "fixed-hash", - "frame-support", - "frame-system", - "hash256-std-hasher", - "impl-codec", - "impl-serde", - "parity-util-mem", - "serde", - "sp-api", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-trie", -] - -[[package]] -name = "bp-polkadot" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-runtime", - "frame-support", - "frame-system", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-rialto" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-runtime", - "frame-support", - "frame-system", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-runtime" -version = "0.1.0" -dependencies = [ - "frame-support", - "num-traits", - "parity-scale-codec", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-test-utils" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "finality-grandpa", - "sp-finality-grandpa", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "bridge-runtime-common" -version = "0.1.0" -dependencies = [ - "bp-message-dispatch", - "bp-message-lane", - "bp-runtime", - "ed25519-dalek", - "frame-support", - "hash-db", - "pallet-bridge-call-dispatch", - "pallet-message-lane", - "pallet-substrate-bridge", - "parity-scale-codec", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "bs58" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" - -[[package]] -name = "bs58" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" - -[[package]] -name = "bstr" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473fc6b38233f9af7baa94fb5852dca389e3d95b8e21c8e3719301462c5d9faf" -dependencies = [ - "memchr", -] - -[[package]] -name = "bumpalo" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" - -[[package]] -name = "byte-slice-cast" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" - -[[package]] -name = "bytes" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -dependencies = [ - "byteorder", - "either", - "iovec", -] - -[[package]] -name = "bytes" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" - -[[package]] -name = "bytes" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1f8e949d755f9d79112b5bb46938e0ef9d3804a0b16dfab13aafcaa5f0fa72" - -[[package]] -name = "cache-padded" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" - -[[package]] -name = "cc" -version = "1.0.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c0496836a84f8d0495758516b8621a622beb77c0fed418570e50764093ced48" -dependencies = [ - "jobserver", -] - -[[package]] -name = "cexpr" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" -dependencies = [ - "nom", -] - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chacha20" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" -dependencies = [ - "stream-cipher", - "zeroize", -] - -[[package]] -name = "chacha20poly1305" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" -dependencies = [ - "aead", - "chacha20", - "poly1305", - "stream-cipher", - "zeroize", -] - -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time 0.1.44", - "winapi 0.3.9", -] - -[[package]] -name = "cid" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d88f30b1e74e7063df5711496f3ee6e74a9735d62062242d70cddf77717f18e" -dependencies = [ - "multibase", - "multihash", - "unsigned-varint 0.5.1", -] - -[[package]] -name = "cipher" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "clang-sys" -version = "0.29.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" -dependencies = [ - "glob", - "libc", - "libloading", -] - -[[package]] -name = "clap" -version = "2.33.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" -dependencies = [ - "ansi_term 0.11.0", - "atty", - "bitflags", - "strsim", - "textwrap", - "unicode-width", - "vec_map", - "yaml-rust", -] - -[[package]] -name = "cloudabi" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -dependencies = [ - "bitflags", -] - -[[package]] -name = "concurrent-queue" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" -dependencies = [ - "cache-padded", -] - -[[package]] -name = "const_fn" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd51eab21ab4fd6a3bf889e2d0958c0a6e3a61ad04260325e919e652a2a62826" - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "core-foundation" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" - -[[package]] -name = "cpp_demangle" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390" -dependencies = [ - "cfg-if 1.0.0", - "glob", -] - -[[package]] -name = "cpuid-bool" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" - -[[package]] -name = "cpuid-bool" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" - -[[package]] -name = "cranelift-bforest" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4066fd63b502d73eb8c5fa6bcab9c7962b05cd580f6b149ee83a8e730d8ce7fb" -dependencies = [ - "cranelift-entity", -] - -[[package]] -name = "cranelift-codegen" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a54e4beb833a3c873a18a8fe735d73d732044004c7539a072c8faa35ccb0c60" -dependencies = [ - "byteorder", - "cranelift-bforest", - "cranelift-codegen-meta", - "cranelift-codegen-shared", - "cranelift-entity", - "gimli", - "log", - "regalloc", - "serde", - "smallvec 1.6.1", - "target-lexicon", - "thiserror", -] - -[[package]] -name = "cranelift-codegen-meta" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54cac7cacb443658d8f0ff36a3545822613fa202c946c0891897843bc933810" -dependencies = [ - "cranelift-codegen-shared", - "cranelift-entity", -] - -[[package]] -name = "cranelift-codegen-shared" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a109760aff76788b2cdaeefad6875a73c2b450be13906524f6c2a81e05b8d83c" - -[[package]] -name = "cranelift-entity" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b044234aa32531f89a08b487630ddc6744696ec04c8123a1ad388de837f5de3" -dependencies = [ - "serde", -] - -[[package]] -name = "cranelift-frontend" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5452b3e4e97538ee5ef2cc071301c69a86c7adf2770916b9d04e9727096abd93" -dependencies = [ - "cranelift-codegen", - "log", - "smallvec 1.6.1", - "target-lexicon", -] - -[[package]] -name = "cranelift-native" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f68035c10b2e80f26cc29c32fa824380877f38483504c2a47b54e7da311caaf3" -dependencies = [ - "cranelift-codegen", - "raw-cpuid", - "target-lexicon", -] - -[[package]] -name = "cranelift-wasm" -version = "0.69.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a530eb9d1c95b3309deb24c3d179d8b0ba5837ed98914a429787c395f614949d" -dependencies = [ - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "itertools", - "log", - "serde", - "smallvec 1.6.1", - "thiserror", - "wasmparser", -] - -[[package]] -name = "crc32fast" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam-channel" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.1", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-deque" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.1", - "crossbeam-utils 0.8.1", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", - "scopeguard", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1aaa739f95311c2c7887a76863f500026092fb1dce0161dab577e559ef3569d" -dependencies = [ - "cfg-if 1.0.0", - "const_fn", - "crossbeam-utils 0.8.1", - "lazy_static", - "memoffset 0.6.1", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d96d1e189ef58269ebe5b97953da3274d83a93af647c2ddd6f9dab28cedb8d" -dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "lazy_static", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array 0.12.3", - "subtle 1.0.0", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", -] - -[[package]] -name = "ct-logs" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c8e13110a84b6315df212c045be706af261fd364791cad863285439ebba672e" -dependencies = [ - "sct", -] - -[[package]] -name = "ctor" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10bcb9d7dcbf7002aaffbb53eac22906b64cdcc127971dcc387d8eb7c95d5560" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "cuckoofilter" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" -dependencies = [ - "byteorder", - "fnv", - "rand 0.7.3", -] - -[[package]] -name = "curve25519-dalek" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "data-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993a608597367c6377b258c25d7120740f00ed23a2252b729b1932dd7866f908" - -[[package]] -name = "data-encoding-macro" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a94feec3d2ba66c0b6621bca8bc6f68415b1e5c69af3586fdd0af9fd9f29b17" -dependencies = [ - "data-encoding", - "data-encoding-macro-internal", -] - -[[package]] -name = "data-encoding-macro-internal" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f83e699727abca3c56e187945f303389590305ab2f0185ea445aa66e8d5f2a" -dependencies = [ - "data-encoding", - "syn", -] - -[[package]] -name = "derive_more" -version = "0.99.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.3", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "directories" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "directories-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - -[[package]] -name = "dirs-sys" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" -dependencies = [ - "libc", - "redox_users 0.3.5", - "winapi 0.3.9", -] - -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users 0.4.0", - "winapi 0.3.9", -] - -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - -[[package]] -name = "dns-parser" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" -dependencies = [ - "byteorder", - "quick-error 1.2.3", -] - -[[package]] -name = "doc-comment" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" - -[[package]] -name = "dyn-clonable" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" -dependencies = [ - "dyn-clonable-impl", - "dyn-clone", -] - -[[package]] -name = "dyn-clonable-impl" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "dyn-clone" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55796afa1b20c2945ca8eabfc421839f2b766619209f1ede813cf2484f31804" - -[[package]] -name = "ed25519" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" -dependencies = [ - "signature", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.0.0", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.9.2", - "zeroize", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "env_logger" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" -dependencies = [ - "atty", - "humantime 2.0.1", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "environmental" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" - -[[package]] -name = "erased-serde" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0465971a8cc1fa2455c8465aaa377131e1f1cf4983280f474a13e68793aa770c" -dependencies = [ - "serde", -] - -[[package]] -name = "errno" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" -dependencies = [ - "gcc", - "libc", -] - -[[package]] -name = "ethabi" -version = "14.0.0" -source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" -dependencies = [ - "anyhow", - "ethereum-types", - "hex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethabi-contract" -version = "11.0.0" -source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" - -[[package]] -name = "ethabi-derive" -version = "14.0.0" -source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" -dependencies = [ - "anyhow", - "ethabi", - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "ethbloom" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a621dcebea74f2a6f2002d0a885c81ccf6cbdf86760183316a7722b5707ca4" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "tiny-keccak", -] - -[[package]] -name = "ethbloom" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779864b9c7f7ead1f092972c3257496c6a84b46dba2ce131dd8a282cb2cc5972" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] - -[[package]] -name = "ethereum-contract-builtin" -version = "0.1.0" -dependencies = [ - "ethereum-types", - "finality-grandpa", - "hex", - "log", - "parity-scale-codec", - "rialto-runtime", - "sc-finality-grandpa", - "sp-blockchain", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", -] - -[[package]] -name = "ethereum-poa-relay" -version = "0.1.0" -dependencies = [ - "ansi_term 0.12.1", - "async-std", - "async-trait", - "bp-currency-exchange", - "bp-eth-poa", - "clap", - "env_logger 0.8.3", - "ethabi", - "ethabi-contract", - "ethabi-derive", - "exchange-relay", - "frame-system", - "futures 0.3.12", - "headers-relay", - "hex", - "hex-literal 0.3.1", - "libsecp256k1", - "log", - "messages-relay", - "num-traits", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-ethereum-client", - "relay-rialto-client", - "relay-substrate-client", - "relay-utils", - "rialto-runtime", - "serde", - "serde_json", - "sp-core", - "sp-keyring", - "sp-runtime", - "substrate-prometheus-endpoint", - "time 0.2.25", -] - -[[package]] -name = "ethereum-types" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64b5df66a228d85e4b17e5d6c6aa43b0310898ffe8a85988c4c032357aaabfd" -dependencies = [ - "ethbloom 0.11.0", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - -[[package]] -name = "event-listener" -version = "2.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" - -[[package]] -name = "exchange-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "backoff", - "futures 0.3.12", - "log", - "num-traits", - "parking_lot 0.11.1", - "relay-utils", -] - -[[package]] -name = "exit-future" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" -dependencies = [ - "futures 0.3.12", -] - -[[package]] -name = "failure" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" -dependencies = [ - "backtrace", - "failure_derive", -] - -[[package]] -name = "failure_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fallible-iterator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" - -[[package]] -name = "fastrand" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" -dependencies = [ - "instant", -] - -[[package]] -name = "fdlimit" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b" -dependencies = [ - "libc", -] - -[[package]] -name = "file-per-thread-logger" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" -dependencies = [ - "env_logger 0.7.1", - "log", -] - -[[package]] -name = "finality-grandpa" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" -dependencies = [ - "either", - "futures 0.3.12", - "futures-timer 3.0.2", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.1", -] - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.3", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "fixedbitset" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" - -[[package]] -name = "flate2" -version = "1.0.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7411863d55df97a419aa64cb4d2f167103ea9d767e2c54a1868b7ac3f6b47129" -dependencies = [ - "cfg-if 1.0.0", - "crc32fast", - "libc", - "libz-sys", - "miniz_oxide", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "fork-tree" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "form_urlencoded" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ece68d15c92e84fa4f19d3780f1294e5ca82a78a6d515f1efaabcc144688be00" -dependencies = [ - "matches", - "percent-encoding 2.1.0", -] - -[[package]] -name = "frame-benchmarking" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "linregress", - "parity-scale-codec", - "paste 1.0.4", - "sp-api", - "sp-io", - "sp-runtime", - "sp-runtime-interface", - "sp-std", - "sp-storage", -] - -[[package]] -name = "frame-benchmarking-cli" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "Inflector", - "chrono", - "frame-benchmarking", - "handlebars", - "parity-scale-codec", - "sc-cli", - "sc-client-db", - "sc-executor", - "sc-service", - "serde", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "structopt", -] - -[[package]] -name = "frame-executive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-tracing", -] - -[[package]] -name = "frame-metadata" -version = "13.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-core", - "sp-std", -] - -[[package]] -name = "frame-support" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "bitflags", - "frame-metadata", - "frame-support-procedural", - "impl-trait-for-tuples", - "log", - "once_cell", - "parity-scale-codec", - "paste 1.0.4", - "serde", - "smallvec 1.6.1", - "sp-arithmetic", - "sp-core", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-staking", - "sp-state-machine", - "sp-std", - "sp-tracing", -] - -[[package]] -name = "frame-support-procedural" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "Inflector", - "frame-support-procedural-tools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support-procedural-tools-derive", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-system" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "impl-trait-for-tuples", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "frame-system-rpc-runtime-api" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sp-api", -] - -[[package]] -name = "fs-swap" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5839fda247e24ca4919c87c71dd5ca658f1f39e4f06829f80e3f15c3bafcfc2c" -dependencies = [ - "lazy_static", - "libc", - "libloading", - "winapi 0.3.9", -] - -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.1.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c7e4c2612746b0df8fed4ce0c69156021b704c9aefa360311c04e6e9e002eed" - -[[package]] -name = "futures" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" - -[[package]] -name = "futures-cpupool" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -dependencies = [ - "futures 0.1.30", - "num_cpus", -] - -[[package]] -name = "futures-diagnose" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" -dependencies = [ - "futures 0.1.30", - "futures 0.3.12", - "lazy_static", - "log", - "parking_lot 0.9.0", - "pin-project 0.4.27", - "serde", - "serde_json", -] - -[[package]] -name = "futures-executor" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" - -[[package]] -name = "futures-lite" -version = "1.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" -dependencies = [ - "fastrand", - "futures-core", - "futures-io", - "memchr", - "parking", - "pin-project-lite 0.2.4", - "waker-fn", -] - -[[package]] -name = "futures-macro" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-rustls" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b" -dependencies = [ - "futures-io", - "rustls 0.19.0", - "webpki", -] - -[[package]] -name = "futures-sink" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" - -[[package]] -name = "futures-task" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" -dependencies = [ - "once_cell", -] - -[[package]] -name = "futures-timer" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" - -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - -[[package]] -name = "futures-util" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" -dependencies = [ - "futures 0.1.30", - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite 0.2.4", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", -] - -[[package]] -name = "gcc" -version = "0.3.55" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" - -[[package]] -name = "generator" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" -dependencies = [ - "cc", - "libc", - "log", - "rustc_version", - "winapi 0.3.9", -] - -[[package]] -name = "generic-array" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4060f4657be78b8e766215b02b18a2e862d83745545de804638e2b545e81aee6" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", -] - -[[package]] -name = "ghash" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" -dependencies = [ - "opaque-debug 0.3.0", - "polyval", -] - -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" -dependencies = [ - "fallible-iterator", - "indexmap", - "stable_deref_trait", -] - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "globset" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" -dependencies = [ - "aho-corasick", - "bstr", - "fnv", - "log", - "regex", -] - -[[package]] -name = "gloo-timers" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "h2" -version = "0.1.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" -dependencies = [ - "byteorder", - "bytes 0.4.12", - "fnv", - "futures 0.1.30", - "http 0.1.21", - "indexmap", - "log", - "slab", - "string", - "tokio-io", -] - -[[package]] -name = "h2" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.2", - "indexmap", - "slab", - "tokio 0.2.24", - "tokio-util", - "tracing", - "tracing-futures", -] - -[[package]] -name = "handlebars" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2764f9796c0ddca4b82c07f25dd2cb3db30b9a8f47940e78e1c883d9e95c3db9" -dependencies = [ - "log", - "pest", - "pest_derive", - "quick-error 2.0.0", - "serde", - "serde_json", -] - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96282e96bfcd3da0d3aa9938bedf1e50df3269b6db08b4876d2da0bb1a0841cf" -dependencies = [ - "ahash 0.3.8", - "autocfg", -] - -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" -dependencies = [ - "ahash 0.4.7", -] - -[[package]] -name = "headers-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "backoff", - "futures 0.3.12", - "linked-hash-map", - "log", - "num-traits", - "parking_lot 0.11.1", - "relay-utils", -] - -[[package]] -name = "heck" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" -dependencies = [ - "unicode-segmentation", -] - -[[package]] -name = "hermit-abi" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" - -[[package]] -name = "hex-literal" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" -dependencies = [ - "hex-literal-impl", - "proc-macro-hack", -] - -[[package]] -name = "hex-literal" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" - -[[package]] -name = "hex-literal-impl" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46" -dependencies = [ - "proc-macro-hack", -] - -[[package]] -name = "hex_fmt" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", -] - -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "hmac-drbg" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" -dependencies = [ - "digest 0.8.1", - "generic-array 0.12.3", - "hmac 0.7.1", -] - -[[package]] -name = "http" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" -dependencies = [ - "bytes 0.4.12", - "fnv", - "itoa", -] - -[[package]] -name = "http" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84129d298a6d57d246960ff8eb831ca4af3f96d29e2e28848dae275408658e26" -dependencies = [ - "bytes 0.5.6", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "http 0.1.21", - "tokio-buf", -] - -[[package]] -name = "http-body" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" -dependencies = [ - "bytes 0.5.6", - "http 0.2.2", -] - -[[package]] -name = "httparse" -version = "1.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" - -[[package]] -name = "httpdate" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" - -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error 1.2.3", -] - -[[package]] -name = "humantime" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" - -[[package]] -name = "hyper" -version = "0.12.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dbe6ed1438e1f8ad955a4701e9a944938e9519f6888d12d8558b645e247d5f6" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "futures-cpupool", - "h2 0.1.26", - "http 0.1.21", - "http-body 0.1.0", - "httparse", - "iovec", - "itoa", - "log", - "net2", - "rustc_version", - "time 0.1.44", - "tokio 0.1.22", - "tokio-buf", - "tokio-executor", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "want 0.2.0", -] - -[[package]] -name = "hyper" -version = "0.13.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" -dependencies = [ - "bytes 0.5.6", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.2.7", - "http 0.2.2", - "http-body 0.3.1", - "httparse", - "httpdate", - "itoa", - "pin-project 1.0.5", - "socket2", - "tokio 0.2.24", - "tower-service", - "tracing", - "want 0.3.0", -] - -[[package]] -name = "hyper-rustls" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" -dependencies = [ - "bytes 0.5.6", - "ct-logs", - "futures-util", - "hyper 0.13.10", - "log", - "rustls 0.18.1", - "rustls-native-certs", - "tokio 0.2.24", - "tokio-rustls", - "webpki", -] - -[[package]] -name = "idna" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "if-addrs" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" -dependencies = [ - "if-addrs-sys", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "if-addrs-sys" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "if-watch" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" -dependencies = [ - "async-io", - "futures 0.3.12", - "futures-lite", - "if-addrs", - "ipnet", - "libc", - "log", - "winapi 0.3.9", -] - -[[package]] -name = "impl-codec" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp 0.5.0", -] - -[[package]] -name = "impl-serde" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "indexmap" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" -dependencies = [ - "autocfg", - "hashbrown 0.9.1", - "serde", -] - -[[package]] -name = "instant" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" -dependencies = [ - "num-traits", -] - -[[package]] -name = "intervalier" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" -dependencies = [ - "futures 0.3.12", - "futures-timer 2.0.2", -] - -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] - -[[package]] -name = "ip_network" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ee15951c035f79eddbef745611ec962f63f4558f1dadf98ab723cc603487c6f" - -[[package]] -name = "ipnet" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" - -[[package]] -name = "itertools" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" - -[[package]] -name = "jobserver" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" -dependencies = [ - "libc", -] - -[[package]] -name = "js-sys" -version = "0.3.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "jsonrpc-client-transports" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" -dependencies = [ - "failure", - "futures 0.1.30", - "jsonrpc-core 15.1.0", - "jsonrpc-pubsub", - "log", - "serde", - "serde_json", - "url 1.7.2", -] - -[[package]] -name = "jsonrpc-core" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" -dependencies = [ - "futures 0.1.30", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonrpc-core" -version = "17.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07569945133257ff557eb37b015497104cea61a2c9edaf126c1cbd6e8332397f" -dependencies = [ - "futures 0.3.12", - "log", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonrpc-core-client" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db" -dependencies = [ - "jsonrpc-client-transports", -] - -[[package]] -name = "jsonrpc-derive" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "jsonrpc-http-server" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" -dependencies = [ - "hyper 0.12.35", - "jsonrpc-core 15.1.0", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot 0.10.2", - "unicase", -] - -[[package]] -name = "jsonrpc-ipc-server" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf50e53e4eea8f421a7316c5f63e395f7bc7c4e786a6dc54d76fab6ff7aa7ce7" -dependencies = [ - "jsonrpc-core 15.1.0", - "jsonrpc-server-utils", - "log", - "parity-tokio-ipc", - "parking_lot 0.10.2", - "tokio-service", -] - -[[package]] -name = "jsonrpc-pubsub" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77" -dependencies = [ - "jsonrpc-core 15.1.0", - "log", - "parking_lot 0.10.2", - "rand 0.7.3", - "serde", -] - -[[package]] -name = "jsonrpc-server-utils" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176" -dependencies = [ - "bytes 0.4.12", - "globset", - "jsonrpc-core 15.1.0", - "lazy_static", - "log", - "tokio 0.1.22", - "tokio-codec", - "unicase", -] - -[[package]] -name = "jsonrpc-ws-server" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6596fe75209b73a2a75ebe1dce4e60e03b88a2b25e8807b667597f6315150d22" -dependencies = [ - "jsonrpc-core 15.1.0", - "jsonrpc-server-utils", - "log", - "parity-ws", - "parking_lot 0.10.2", - "slab", -] - -[[package]] -name = "jsonrpsee" -version = "1.0.0" -source = "git+https://github.com/svyatonik/jsonrpsee.git?branch=shared-client-in-rpc-api#1597b09c4a9140cd0f1320948c7a8fb237af58fb" -dependencies = [ - "async-std", - "async-tls", - "bs58 0.3.1", - "bytes 0.5.6", - "fnv", - "futures 0.3.12", - "futures-timer 3.0.2", - "globset", - "hashbrown 0.7.2", - "hyper 0.13.10", - "jsonrpsee-proc-macros", - "lazy_static", - "log", - "parking_lot 0.10.2", - "pin-project 0.4.27", - "rand 0.7.3", - "serde", - "serde_json", - "smallvec 1.6.1", - "soketto 0.3.2", - "thiserror", - "tokio 0.2.24", - "unicase", - "url 2.2.0", - "webpki", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "1.0.0" -source = "git+https://github.com/svyatonik/jsonrpsee.git?branch=shared-client-in-rpc-api#1597b09c4a9140cd0f1320948c7a8fb237af58fb" -dependencies = [ - "Inflector", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "kv-log-macro" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" -dependencies = [ - "log", -] - -[[package]] -name = "kvdb" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8891bd853eff90e33024195d79d578dc984c82f9e0715fcd2b525a0c19d52811" -dependencies = [ - "parity-util-mem", - "smallvec 1.6.1", -] - -[[package]] -name = "kvdb-memorydb" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a0da8e08caf08d384a620ec19bb6c9b85c84137248e202617fb91881f25912" -dependencies = [ - "kvdb", - "parity-util-mem", - "parking_lot 0.11.1", -] - -[[package]] -name = "kvdb-rocksdb" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34446c373ccc494c2124439281c198c7636ccdc2752c06722bbffd56d459c1e4" -dependencies = [ - "fs-swap", - "kvdb", - "log", - "num_cpus", - "owning_ref", - "parity-util-mem", - "parking_lot 0.11.1", - "regex", - "rocksdb", - "smallvec 1.6.1", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - -[[package]] -name = "leb128" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" - -[[package]] -name = "libc" -version = "0.2.81" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" - -[[package]] -name = "libloading" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" -dependencies = [ - "cc", - "winapi 0.3.9", -] - -[[package]] -name = "libm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" - -[[package]] -name = "libp2p" -version = "0.35.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc225a49973cf9ab10d0cdd6a4b8f0cda299df9b760824bbb623f15f8f0c95a" -dependencies = [ - "atomic", - "bytes 1.0.0", - "futures 0.3.12", - "lazy_static", - "libp2p-core", - "libp2p-deflate", - "libp2p-dns", - "libp2p-floodsub", - "libp2p-gossipsub", - "libp2p-identify", - "libp2p-kad", - "libp2p-mdns", - "libp2p-mplex", - "libp2p-noise", - "libp2p-ping", - "libp2p-plaintext", - "libp2p-pnet", - "libp2p-request-response", - "libp2p-swarm", - "libp2p-swarm-derive", - "libp2p-tcp", - "libp2p-uds", - "libp2p-wasm-ext", - "libp2p-websocket", - "libp2p-yamux", - "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "smallvec 1.6.1", - "wasm-timer", -] - -[[package]] -name = "libp2p-core" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2d56aadc2c2bf22cd7797f86e56a65b5b3994a0136b65be3106938acae7a26" -dependencies = [ - "asn1_der", - "bs58 0.4.0", - "ed25519-dalek", - "either", - "fnv", - "futures 0.3.12", - "futures-timer 3.0.2", - "lazy_static", - "libsecp256k1", - "log", - "multihash", - "multistream-select", - "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "prost", - "prost-build", - "rand 0.7.3", - "ring", - "rw-stream-sink", - "sha2 0.9.2", - "smallvec 1.6.1", - "thiserror", - "unsigned-varint 0.7.0", - "void", - "zeroize", -] - -[[package]] -name = "libp2p-deflate" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d42eed63305f0420736fa487f9acef720c4528bd7852a6a760f5ccde4813345" -dependencies = [ - "flate2", - "futures 0.3.12", - "libp2p-core", -] - -[[package]] -name = "libp2p-dns" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5153b6db68fd4baa3b304e377db744dd8fea8ff4e4504509ee636abcde88d3e3" -dependencies = [ - "futures 0.3.12", - "libp2p-core", - "log", -] - -[[package]] -name = "libp2p-floodsub" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c63dfa06581b24b1d12bf9815b43689a784424be217d6545c800c7c75a207f" -dependencies = [ - "cuckoofilter", - "fnv", - "futures 0.3.12", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "smallvec 1.6.1", -] - -[[package]] -name = "libp2p-gossipsub" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502dc5fcbfec4aa1c63ef3f7307ffe20e90c1a1387bf23ed0bec087f2dde58a1" -dependencies = [ - "asynchronous-codec 0.6.0", - "base64 0.13.0", - "byteorder", - "bytes 1.0.0", - "fnv", - "futures 0.3.12", - "hex_fmt", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "regex", - "sha2 0.9.2", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "wasm-timer", -] - -[[package]] -name = "libp2p-identify" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40fb36a059b7a8cce1514bd8b546fa612e006c9937caa7f5950cb20021fe91e" -dependencies = [ - "futures 0.3.12", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "smallvec 1.6.1", - "wasm-timer", -] - -[[package]] -name = "libp2p-kad" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3da6c9acbcc05f93235d201d7d45ef4e8b88a45d8836f98becd8b4d443f066" -dependencies = [ - "arrayvec 0.5.2", - "asynchronous-codec 0.6.0", - "bytes 1.0.0", - "either", - "fnv", - "futures 0.3.12", - "libp2p-core", - "libp2p-swarm", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.9.2", - "smallvec 1.6.1", - "uint", - "unsigned-varint 0.7.0", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-mdns" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9e6374814d1b118d97ccabdfc975c8910bd16dc38a8bc058eeb08bf2080fe1" -dependencies = [ - "async-io", - "data-encoding", - "dns-parser", - "futures 0.3.12", - "if-watch", - "lazy_static", - "libp2p-core", - "libp2p-swarm", - "log", - "rand 0.7.3", - "smallvec 1.6.1", - "socket2", - "void", -] - -[[package]] -name = "libp2p-mplex" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350ce8b3923594aedabd5d6e3f875d058435052a29c3f32df378bc70d10be464" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.0", - "futures 0.3.12", - "libp2p-core", - "log", - "nohash-hasher", - "parking_lot 0.11.1", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", -] - -[[package]] -name = "libp2p-noise" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aca322b52a0c5136142a7c3971446fb1e9964923a526c9cc6ef3b7c94e57778" -dependencies = [ - "bytes 1.0.0", - "curve25519-dalek 3.0.0", - "futures 0.3.12", - "lazy_static", - "libp2p-core", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.9.2", - "snow", - "static_assertions", - "x25519-dalek", - "zeroize", -] - -[[package]] -name = "libp2p-ping" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3813276d0708c8db0f500d8beda1bda9ad955723b9cb272c41f4727256f73c" -dependencies = [ - "futures 0.3.12", - "libp2p-core", - "libp2p-swarm", - "log", - "rand 0.7.3", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-plaintext" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d58defcadb646ae4b033e130b48d87410bf76394dc3335496cae99dac803e61" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.0", - "futures 0.3.12", - "libp2p-core", - "log", - "prost", - "prost-build", - "unsigned-varint 0.7.0", - "void", -] - -[[package]] -name = "libp2p-pnet" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599" -dependencies = [ - "futures 0.3.12", - "log", - "pin-project 1.0.5", - "rand 0.7.3", - "salsa20", - "sha3", -] - -[[package]] -name = "libp2p-request-response" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10e5552827c33d8326502682da73a0ba4bfa40c1b55b216af3c303f32169dd89" -dependencies = [ - "async-trait", - "bytes 1.0.0", - "futures 0.3.12", - "libp2p-core", - "libp2p-swarm", - "log", - "lru", - "minicbor", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "wasm-timer", -] - -[[package]] -name = "libp2p-swarm" -version = "0.27.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7955b973e1fd2bd61ffd43ce261c1223f61f4aacd5bae362a924993f9a25fd98" -dependencies = [ - "either", - "futures 0.3.12", - "libp2p-core", - "log", - "rand 0.7.3", - "smallvec 1.6.1", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-swarm-derive" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c564ebaa36a64839f51eaddb0243aaaa29ce64affb56129193cc3248b72af273" -dependencies = [ - "quote", - "syn", -] - -[[package]] -name = "libp2p-tcp" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a5aef80e519a6cb8e2663605142f97baaaea1a252eecbf8756184765f7471b" -dependencies = [ - "async-io", - "futures 0.3.12", - "futures-timer 3.0.2", - "if-watch", - "ipnet", - "libc", - "libp2p-core", - "log", - "socket2", -] - -[[package]] -name = "libp2p-uds" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80ac51ce419f60be966e02103c17f67ff5dc4422ba83ba54d251d6c62a4ed487" -dependencies = [ - "async-std", - "futures 0.3.12", - "libp2p-core", - "log", -] - -[[package]] -name = "libp2p-wasm-ext" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6149c46cb76935c80bc8be6ec6e3ebd5f5e1679765a255fb34331d54610f15dd" -dependencies = [ - "futures 0.3.12", - "js-sys", - "libp2p-core", - "parity-send-wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", -] - -[[package]] -name = "libp2p-websocket" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b1c6a3431045da8b925ed83384e4c5163e14b990572307fca9c507435d4d22" -dependencies = [ - "either", - "futures 0.3.12", - "futures-rustls", - "libp2p-core", - "log", - "quicksink", - "rw-stream-sink", - "soketto 0.4.2", - "url 2.2.0", - "webpki-roots 0.21.0", -] - -[[package]] -name = "libp2p-yamux" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4819358c542a86ff95f6ae691efb4b94ddaf477079b01a686f5705b79bfc232a" -dependencies = [ - "futures 0.3.12", - "libp2p-core", - "parking_lot 0.11.1", - "thiserror", - "yamux", -] - -[[package]] -name = "librocksdb-sys" -version = "6.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09" -dependencies = [ - "bindgen", - "cc", - "glob", - "libc", -] - -[[package]] -name = "libsecp256k1" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" -dependencies = [ - "arrayref", - "crunchy", - "digest 0.8.1", - "hmac-drbg", - "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", - "typenum", -] - -[[package]] -name = "libz-sys" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "linked-hash-map" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" - -[[package]] -name = "linked_hash_set" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" -dependencies = [ - "linked-hash-map", -] - -[[package]] -name = "linregress" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0ad4b5cc8385a881c561fac3501353d63d2a2b7a357b5064d71815c9a92724" -dependencies = [ - "nalgebra", - "statrs", -] - -[[package]] -name = "lock_api" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "lock_api" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if 1.0.0", - "value-bag", -] - -[[package]] -name = "loom" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" -dependencies = [ - "cfg-if 0.1.10", - "generator", - "scoped-tls", - "serde", - "serde_json", -] - -[[package]] -name = "lru" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3aae342b73d57ad0b8b364bd12584819f2c1fe9114285dfcf8b0722607671635" -dependencies = [ - "hashbrown 0.9.1", -] - -[[package]] -name = "mach" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" -dependencies = [ - "libc", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "matches" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" - -[[package]] -name = "matrixmultiply" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1" -dependencies = [ - "rawpointer", -] - -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - -[[package]] -name = "memchr" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" - -[[package]] -name = "memmap2" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e3e85b970d650e2ae6d70592474087051c11c54da7f7b4949725c5735fbcc6" -dependencies = [ - "libc", -] - -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - -[[package]] -name = "memoffset" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" -dependencies = [ - "autocfg", -] - -[[package]] -name = "memory-db" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "814bbecfc0451fc314eeea34f05bbcd5b98a7ad7af37faee088b86a1e633f1d4" -dependencies = [ - "hash-db", - "hashbrown 0.9.1", - "parity-util-mem", -] - -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "messages-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "bp-message-lane", - "futures 0.3.12", - "hex", - "log", - "parking_lot 0.11.1", - "relay-utils", -] - -[[package]] -name = "millau-bridge-node" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-millau", - "bp-runtime", - "frame-benchmarking", - "frame-benchmarking-cli", - "jsonrpc-core 15.1.0", - "millau-runtime", - "pallet-message-lane", - "pallet-message-lane-rpc", - "sc-basic-authorship", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-consensus-aura", - "sc-executor", - "sc-finality-grandpa", - "sc-finality-grandpa-rpc", - "sc-keystore", - "sc-rpc", - "sc-service", - "sc-transaction-pool", - "sp-consensus", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-runtime", - "structopt", - "substrate-build-script-utils", - "substrate-frame-rpc-system", - "vergen", -] - -[[package]] -name = "millau-runtime" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-message-lane", - "bp-millau", - "bp-rialto", - "bp-runtime", - "bridge-runtime-common", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "hex-literal 0.3.1", - "pallet-aura", - "pallet-balances", - "pallet-bridge-call-dispatch", - "pallet-finality-verifier", - "pallet-grandpa", - "pallet-message-lane", - "pallet-randomness-collective-flip", - "pallet-session", - "pallet-shift-session-manager", - "pallet-substrate-bridge", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "parity-scale-codec", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std", - "sp-transaction-pool", - "sp-trie", - "sp-version", - "substrate-wasm-builder-runner", -] - -[[package]] -name = "minicbor" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0164190d1771b1458c3742075b057ed55d25cd9dfb930aade99315a1eb1fe12d" -dependencies = [ - "minicbor-derive", -] - -[[package]] -name = "minicbor-derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e071b3159835ee91df62dbdbfdd7ec366b7ea77c838f43aff4acda6b61bcfb9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "miniz_oxide" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - -[[package]] -name = "mio-extras" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" -dependencies = [ - "lazycell", - "log", - "mio", - "slab", -] - -[[package]] -name = "mio-named-pipes" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" -dependencies = [ - "log", - "mio", - "miow 0.3.6", - "winapi 0.3.9", -] - -[[package]] -name = "mio-uds" -version = "0.6.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" -dependencies = [ - "iovec", - "libc", - "mio", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", -] - -[[package]] -name = "miow" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" -dependencies = [ - "socket2", - "winapi 0.3.9", -] - -[[package]] -name = "more-asserts" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" - -[[package]] -name = "multibase" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b78c60039650ff12e140ae867ef5299a58e19dded4d334c849dc7177083667e2" -dependencies = [ - "base-x", - "data-encoding", - "data-encoding-macro", -] - -[[package]] -name = "multihash" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" -dependencies = [ - "blake2b_simd", - "blake2s_simd", - "blake3", - "digest 0.9.0", - "generic-array 0.14.4", - "multihash-derive", - "sha2 0.9.2", - "sha3", - "unsigned-varint 0.5.1", -] - -[[package]] -name = "multihash-derive" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85ee3c48cb9d9b275ad967a0e96715badc13c6029adb92f34fa17b9ff28fd81f" -dependencies = [ - "proc-macro-crate", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "multimap" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" - -[[package]] -name = "multistream-select" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10ddc0eb0117736f19d556355464fc87efc8ad98b29e3fd84f02531eb6e90840" -dependencies = [ - "bytes 1.0.0", - "futures 0.3.12", - "log", - "pin-project 1.0.5", - "smallvec 1.6.1", - "unsigned-varint 0.6.0", -] - -[[package]] -name = "nalgebra" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486" -dependencies = [ - "approx", - "generic-array 0.13.2", - "matrixmultiply", - "num-complex", - "num-rational", - "num-traits", - "rand 0.7.3", - "rand_distr", - "simba", - "typenum", -] - -[[package]] -name = "names" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da" -dependencies = [ - "rand 0.3.23", -] - -[[package]] -name = "nb-connect" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "nohash-hasher" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" - -[[package]] -name = "nom" -version = "5.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" -dependencies = [ - "memchr", - "version_check", -] - -[[package]] -name = "ntapi" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-complex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = [ - "autocfg", - "libm", -] - -[[package]] -name = "num_cpus" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" -dependencies = [ - "crc32fast", - "indexmap", -] - -[[package]] -name = "once_cell" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" -dependencies = [ - "parking_lot 0.11.1", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "openssl-probe" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" - -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - -[[package]] -name = "pallet-aura" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "pallet-session", - "pallet-timestamp", - "parity-scale-codec", - "serde", - "sp-application-crypto", - "sp-consensus-aura", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "pallet-authorship" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-authorship", - "sp-inherents", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-balances" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-call-dispatch" -version = "0.1.0" -dependencies = [ - "bp-message-dispatch", - "bp-runtime", - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-currency-exchange" -version = "0.1.0" -dependencies = [ - "bp-currency-exchange", - "bp-header-chain", - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-eth-poa" -version = "0.1.0" -dependencies = [ - "bp-eth-poa", - "frame-benchmarking", - "frame-support", - "frame-system", - "hex-literal 0.3.1", - "libsecp256k1", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-finality-verifier" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-runtime", - "bp-test-utils", - "finality-grandpa", - "frame-support", - "frame-system", - "pallet-substrate-bridge", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-grandpa" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "pallet-authorship", - "pallet-session", - "parity-scale-codec", - "serde", - "sp-application-crypto", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-std", -] - -[[package]] -name = "pallet-message-lane" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-rialto", - "bp-runtime", - "frame-benchmarking", - "frame-support", - "frame-system", - "hex-literal 0.3.1", - "num-traits", - "pallet-balances", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-message-lane-rpc" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-runtime", - "derive_more", - "futures 0.3.12", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "log", - "sc-client-api", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-trie", -] - -[[package]] -name = "pallet-randomness-collective-flip" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "safe-mix", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-session" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "pallet-timestamp", - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-std", - "sp-trie", -] - -[[package]] -name = "pallet-shift-session-manager" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "pallet-session", - "parity-scale-codec", - "serde", - "sp-core", - "sp-runtime", - "sp-staking", - "sp-std", -] - -[[package]] -name = "pallet-substrate-bridge" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-runtime", - "bp-test-utils", - "finality-grandpa", - "frame-support", - "frame-system", - "hash-db", - "parity-scale-codec", - "serde", - "sp-core", - "sp-finality-grandpa", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "pallet-sudo" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-timestamp" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "impl-trait-for-tuples", - "parity-scale-codec", - "serde", - "sp-inherents", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "pallet-transaction-payment" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "serde", - "smallvec 1.6.1", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "parity-bytes" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" - -[[package]] -name = "parity-db" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111e193c96758d476d272093a853882668da17489f76bf4361b8decae0b6c515" -dependencies = [ - "blake2-rfc", - "crc32fast", - "hex", - "libc", - "log", - "memmap2", - "parking_lot 0.11.1", - "rand 0.8.3", -] - -[[package]] -name = "parity-multiaddr" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c6805f98667a3828afb2ec2c396a8d610497e8d546f5447188aae47c5a79ec" -dependencies = [ - "arrayref", - "bs58 0.4.0", - "byteorder", - "data-encoding", - "multihash", - "percent-encoding 2.1.0", - "serde", - "static_assertions", - "unsigned-varint 0.7.0", - "url 2.2.0", -] - -[[package]] -name = "parity-scale-codec" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c823fdae1bb5ff5708ee61a62697e6296175dc671710876871c853f48592b3" -dependencies = [ - "arrayvec 0.5.2", - "bitvec", - "byte-slice-cast", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9029e65297c7fd6d7013f0579e193ec2b34ae78eabca854c9417504ad8a2d214" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-send-wrapper" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" - -[[package]] -name = "parity-tokio-ipc" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "libc", - "log", - "mio-named-pipes", - "miow 0.3.6", - "rand 0.7.3", - "tokio 0.1.22", - "tokio-named-pipes", - "tokio-uds", - "winapi 0.3.9", -] - -[[package]] -name = "parity-util-mem" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "664a8c6b8e62d8f9f2f937e391982eb433ab285b4cd9545b342441e04a906e42" -dependencies = [ - "cfg-if 1.0.0", - "hashbrown 0.9.1", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot 0.11.1", - "primitive-types", - "smallvec 1.6.1", - "winapi 0.3.9", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" - -[[package]] -name = "parity-ws" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e02a625dd75084c2a7024f07c575b61b782f729d18702dabb3cdbf31911dc61" -dependencies = [ - "byteorder", - "bytes 0.4.12", - "httparse", - "log", - "mio", - "mio-extras", - "rand 0.7.3", - "sha-1 0.8.2", - "slab", - "url 2.2.0", -] - -[[package]] -name = "parking" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" - -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.6.2", - "rustc_version", -] - -[[package]] -name = "parking_lot" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.7.2", -] - -[[package]] -name = "parking_lot" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" -dependencies = [ - "instant", - "lock_api 0.4.2", - "parking_lot_core 0.8.2", -] - -[[package]] -name = "parking_lot_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "rustc_version", - "smallvec 0.6.14", - "winapi 0.3.9", -] - -[[package]] -name = "parking_lot_core" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "smallvec 1.6.1", - "winapi 0.3.9", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall 0.1.57", - "smallvec 1.6.1", - "winapi 0.3.9", -] - -[[package]] -name = "paste" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] - -[[package]] -name = "paste" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" - -[[package]] -name = "paste-impl" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" -dependencies = [ - "proc-macro-hack", -] - -[[package]] -name = "pbkdf2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" -dependencies = [ - "byteorder", - "crypto-mac 0.7.0", -] - -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac 0.8.0", -] - -[[package]] -name = "pdqselect" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec91767ecc0a0bbe558ce8c9da33c068066c57ecc8bb8477ef8c1ad3ef77c27" - -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" - -[[package]] -name = "percent-encoding" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" - -[[package]] -name = "pest" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" -dependencies = [ - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pest_meta" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" -dependencies = [ - "maplit", - "pest", - "sha-1 0.8.2", -] - -[[package]] -name = "petgraph" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" -dependencies = [ - "fixedbitset", - "indexmap", -] - -[[package]] -name = "pin-project" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" -dependencies = [ - "pin-project-internal 0.4.27", -] - -[[package]] -name = "pin-project" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" -dependencies = [ - "pin-project-internal 1.0.5", -] - -[[package]] -name = "pin-project-internal" -version = "0.4.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-internal" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "pin-project-lite" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" - -[[package]] -name = "pin-project-lite" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" - -[[package]] -name = "plain_hasher" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e19e6491bdde87c2c43d70f4c194bc8a758f2eb732df00f61e43f7362e3b4cc" -dependencies = [ - "crunchy", -] - -[[package]] -name = "platforms" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" - -[[package]] -name = "polling" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "log", - "wepoll-sys", - "winapi 0.3.9", -] - -[[package]] -name = "poly1305" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" -dependencies = [ - "cpuid-bool 0.2.0", - "universal-hash", -] - -[[package]] -name = "polyval" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" -dependencies = [ - "cpuid-bool 0.2.0", - "opaque-debug 0.3.0", - "universal-hash", -] - -[[package]] -name = "ppv-lite86" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" - -[[package]] -name = "primitive-types" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2415937401cb030a2a0a4d922483f945fa068f52a7dbb22ce0fe5f2b6f6adace" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" - -[[package]] -name = "proc-macro2" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "prometheus" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" -dependencies = [ - "cfg-if 1.0.0", - "fnv", - "lazy_static", - "parking_lot 0.11.1", - "regex", - "thiserror", -] - -[[package]] -name = "prost" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" -dependencies = [ - "bytes 1.0.0", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" -dependencies = [ - "bytes 1.0.0", - "heck", - "itertools", - "log", - "multimap", - "petgraph", - "prost", - "prost-types", - "tempfile", - "which 4.0.2", -] - -[[package]] -name = "prost-derive" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" -dependencies = [ - "bytes 1.0.0", - "prost", -] - -[[package]] -name = "psm" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3abf49e5417290756acfd26501536358560c4a5cc4a0934d390939acb3e7083a" -dependencies = [ - "cc", -] - -[[package]] -name = "pwasm-utils" -version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" -dependencies = [ - "byteorder", - "log", - "parity-wasm", -] - -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - -[[package]] -name = "quick-error" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" - -[[package]] -name = "quicksink" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project-lite 0.1.11", -] - -[[package]] -name = "quote" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" -dependencies = [ - "libc", - "rand 0.4.6", -] - -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi 0.3.9", -] - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.15", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", - "rand_pcg", -] - -[[package]] -name = "rand" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" -dependencies = [ - "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.2", - "rand_hc 0.3.0", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.2", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.15", -] - -[[package]] -name = "rand_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" -dependencies = [ - "getrandom 0.2.1", -] - -[[package]] -name = "rand_distr" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" -dependencies = [ - "rand 0.7.3", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_hc" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" -dependencies = [ - "rand_core 0.6.2", -] - -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "raw-cpuid" -version = "8.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73" -dependencies = [ - "bitflags", - "cc", - "rustc_version", -] - -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - -[[package]] -name = "rayon" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" -dependencies = [ - "autocfg", - "crossbeam-deque 0.8.0", - "either", - "rayon-core", -] - -[[package]] -name = "rayon-core" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" -dependencies = [ - "crossbeam-channel", - "crossbeam-deque 0.8.0", - "crossbeam-utils 0.8.1", - "lazy_static", - "num_cpus", -] - -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "redox_syscall" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ec8ca9416c5ea37062b502703cd7fcb207736bc294f6e0cf367ac6fc234570" -dependencies = [ - "bitflags", -] - -[[package]] -name = "redox_users" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" -dependencies = [ - "getrandom 0.1.15", - "redox_syscall 0.1.57", - "rust-argon2", -] - -[[package]] -name = "redox_users" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" -dependencies = [ - "getrandom 0.2.1", - "redox_syscall 0.2.4", -] - -[[package]] -name = "ref-cast" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e84b8a3c77dd38893c11b59284a40f304a1346d4da020e603fab3671727df95d" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d5173fc07aa6595363a38ca7d69d438cc32cca4216ccd1a3a8f2d4b10bbcd0" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "regalloc" -version = "0.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" -dependencies = [ - "log", - "rustc-hash", - "smallvec 1.6.1", -] - -[[package]] -name = "regex" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", -] - -[[package]] -name = "regex-automata" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" -dependencies = [ - "byteorder", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" - -[[package]] -name = "region" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0" -dependencies = [ - "bitflags", - "libc", - "mach", - "winapi 0.3.9", -] - -[[package]] -name = "relay-ethereum-client" -version = "0.1.0" -dependencies = [ - "bp-eth-poa", - "headers-relay", - "hex-literal 0.3.1", - "jsonrpsee", - "libsecp256k1", - "log", - "parity-scale-codec", - "relay-utils", - "web3", -] - -[[package]] -name = "relay-kusama-client" -version = "0.1.0" -dependencies = [ - "bp-kusama", - "frame-support", - "frame-system", - "headers-relay", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-millau-client" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "headers-relay", - "millau-runtime", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-polkadot-client" -version = "0.1.0" -dependencies = [ - "bp-polkadot", - "frame-support", - "frame-system", - "headers-relay", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-rialto-client" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "headers-relay", - "pallet-transaction-payment", - "parity-scale-codec", - "relay-substrate-client", - "relay-utils", - "rialto-runtime", - "sp-core", - "sp-keyring", - "sp-runtime", -] - -[[package]] -name = "relay-substrate-client" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "bp-message-lane", - "bp-runtime", - "frame-support", - "frame-system", - "futures 0.3.12", - "headers-relay", - "jsonrpsee", - "log", - "num-traits", - "pallet-balances", - "parity-scale-codec", - "rand 0.7.3", - "relay-utils", - "sc-rpc-api", - "sp-core", - "sp-runtime", - "sp-std", - "sp-trie", - "sp-version", -] - -[[package]] -name = "relay-utils" -version = "0.1.0" -dependencies = [ - "ansi_term 0.12.1", - "async-std", - "async-trait", - "backoff", - "env_logger 0.8.3", - "futures 0.3.12", - "log", - "num-traits", - "substrate-prometheus-endpoint", - "sysinfo", - "time 0.2.25", -] - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "retain_mut" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53552c6c49e1e13f1a203ef0080ab3bbef0beb570a528993e83df057a9d9bba1" - -[[package]] -name = "rialto-bridge-node" -version = "0.1.0" -dependencies = [ - "bp-message-lane", - "bp-rialto", - "bp-runtime", - "frame-benchmarking", - "frame-benchmarking-cli", - "jsonrpc-core 15.1.0", - "pallet-message-lane", - "pallet-message-lane-rpc", - "rialto-runtime", - "sc-basic-authorship", - "sc-cli", - "sc-client-api", - "sc-consensus", - "sc-consensus-aura", - "sc-executor", - "sc-finality-grandpa", - "sc-finality-grandpa-rpc", - "sc-keystore", - "sc-rpc", - "sc-service", - "sc-telemetry", - "sc-transaction-pool", - "sp-consensus", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-runtime", - "structopt", - "substrate-build-script-utils", - "substrate-frame-rpc-system", - "vergen", -] - -[[package]] -name = "rialto-runtime" -version = "0.1.0" -dependencies = [ - "bp-currency-exchange", - "bp-eth-poa", - "bp-header-chain", - "bp-message-dispatch", - "bp-message-lane", - "bp-millau", - "bp-rialto", - "bp-runtime", - "bridge-runtime-common", - "frame-benchmarking", - "frame-executive", - "frame-support", - "frame-system", - "frame-system-rpc-runtime-api", - "hex-literal 0.3.1", - "libsecp256k1", - "pallet-aura", - "pallet-balances", - "pallet-bridge-call-dispatch", - "pallet-bridge-currency-exchange", - "pallet-bridge-eth-poa", - "pallet-finality-verifier", - "pallet-grandpa", - "pallet-message-lane", - "pallet-randomness-collective-flip", - "pallet-session", - "pallet-shift-session-manager", - "pallet-substrate-bridge", - "pallet-sudo", - "pallet-timestamp", - "pallet-transaction-payment", - "parity-scale-codec", - "serde", - "sp-api", - "sp-block-builder", - "sp-consensus-aura", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-io", - "sp-offchain", - "sp-runtime", - "sp-session", - "sp-std", - "sp-transaction-pool", - "sp-trie", - "sp-version", - "substrate-wasm-builder-runner", -] - -[[package]] -name = "ring" -version = "0.16.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "024a1e66fea74c66c66624ee5622a7ff0e4b73a13b4f5c326ddb50c708944226" -dependencies = [ - "cc", - "libc", - "once_cell", - "spin", - "untrusted", - "web-sys", - "winapi 0.3.9", -] - -[[package]] -name = "rlp" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1190dcc8c3a512f1eef5d09bb8c84c7f39e1054e174d1795482e18f5272f2e73" -dependencies = [ - "rustc-hex", -] - -[[package]] -name = "rlp" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54369147e3e7796c9b885c7304db87ca3d09a0a98f72843d532868675bbfba8" -dependencies = [ - "bytes 1.0.0", - "rustc-hex", -] - -[[package]] -name = "rocksdb" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d83c02c429044d58474eaf5ae31e062d0de894e21125b47437ec0edc1397e6" -dependencies = [ - "libc", - "librocksdb-sys", -] - -[[package]] -name = "rpassword" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d755237fc0f99d98641540e66abac8bc46a0652f19148ac9e21de2da06b326c9" -dependencies = [ - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "rust-argon2" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" -dependencies = [ - "base64 0.13.0", - "blake2b_simd", - "constant_time_eq", - "crossbeam-utils 0.8.1", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "rustls" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" -dependencies = [ - "base64 0.10.1", - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls" -version = "0.18.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" -dependencies = [ - "base64 0.12.3", - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" -dependencies = [ - "base64 0.13.0", - "log", - "ring", - "sct", - "webpki", -] - -[[package]] -name = "rustls-native-certs" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" -dependencies = [ - "openssl-probe", - "rustls 0.18.1", - "schannel", - "security-framework", -] - -[[package]] -name = "rw-stream-sink" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" -dependencies = [ - "futures 0.3.12", - "pin-project 0.4.27", - "static_assertions", -] - -[[package]] -name = "ryu" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" - -[[package]] -name = "safe-mix" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3d055a2582e6b00ed7a31c1524040aa391092bf636328350813f3a0605215c" -dependencies = [ - "rustc_version", -] - -[[package]] -name = "salsa20" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" -dependencies = [ - "cipher", -] - -[[package]] -name = "sc-basic-authorship" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "futures-timer 3.0.2", - "log", - "parity-scale-codec", - "sc-block-builder", - "sc-client-api", - "sc-proposer-metrics", - "sc-telemetry", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-transaction-pool", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-block-builder" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sc-client-api", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-state-machine", -] - -[[package]] -name = "sc-chain-spec" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sc-chain-spec-derive", - "sc-consensus-babe", - "sc-consensus-epochs", - "sc-finality-grandpa", - "sc-network", - "sc-telemetry", - "serde", - "serde_json", - "sp-chain-spec", - "sp-consensus-babe", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "sc-chain-spec-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sc-cli" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "chrono", - "fdlimit", - "futures 0.3.12", - "hex", - "libp2p", - "log", - "names", - "parity-scale-codec", - "rand 0.7.3", - "regex", - "rpassword", - "sc-client-api", - "sc-keystore", - "sc-network", - "sc-service", - "sc-telemetry", - "sc-tracing", - "serde", - "serde_json", - "sp-blockchain", - "sp-core", - "sp-keyring", - "sp-keystore", - "sp-panic-handler", - "sp-runtime", - "sp-utils", - "sp-version", - "structopt", - "thiserror", - "tiny-bip39", - "tokio 0.2.24", -] - -[[package]] -name = "sc-client-api" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "fnv", - "futures 0.3.12", - "hash-db", - "kvdb", - "lazy_static", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-executor", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-database", - "sp-externalities", - "sp-inherents", - "sp-keystore", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-storage", - "sp-transaction-pool", - "sp-trie", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-client-db" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "blake2-rfc", - "hash-db", - "kvdb", - "kvdb-memorydb", - "kvdb-rocksdb", - "linked-hash-map", - "log", - "parity-db", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "sc-client-api", - "sc-executor", - "sc-state-db", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-database", - "sp-runtime", - "sp-state-machine", - "sp-trie", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-consensus" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "sc-client-api", - "sp-blockchain", - "sp-consensus", - "sp-runtime", -] - -[[package]] -name = "sc-consensus-aura" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "futures 0.3.12", - "futures-timer 3.0.2", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-block-builder", - "sc-client-api", - "sc-consensus-slots", - "sc-telemetry", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-consensus-aura", - "sp-consensus-slots", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keystore", - "sp-runtime", - "sp-timestamp", - "sp-version", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-consensus-babe" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "fork-tree", - "futures 0.3.12", - "futures-timer 3.0.2", - "log", - "merlin", - "num-bigint", - "num-rational", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.1", - "pdqselect", - "rand 0.7.3", - "retain_mut", - "sc-client-api", - "sc-consensus-epochs", - "sc-consensus-slots", - "sc-consensus-uncles", - "sc-keystore", - "sc-telemetry", - "schnorrkel", - "serde", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-consensus-babe", - "sp-consensus-slots", - "sp-consensus-vrf", - "sp-core", - "sp-inherents", - "sp-io", - "sp-keystore", - "sp-runtime", - "sp-timestamp", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-consensus-epochs" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "fork-tree", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-client-api", - "sp-blockchain", - "sp-runtime", -] - -[[package]] -name = "sc-consensus-slots" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "futures-timer 3.0.2", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-client-api", - "sc-telemetry", - "sp-api", - "sp-application-crypto", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-consensus-slots", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-state-machine", - "sp-trie", - "thiserror", -] - -[[package]] -name = "sc-consensus-uncles" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "sc-client-api", - "sp-authorship", - "sp-consensus", - "sp-core", - "sp-inherents", - "sp-runtime", -] - -[[package]] -name = "sc-executor" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "lazy_static", - "libsecp256k1", - "log", - "parity-scale-codec", - "parity-wasm", - "parking_lot 0.11.1", - "sc-executor-common", - "sc-executor-wasmi", - "sc-executor-wasmtime", - "sp-api", - "sp-core", - "sp-externalities", - "sp-io", - "sp-panic-handler", - "sp-runtime-interface", - "sp-serializer", - "sp-tasks", - "sp-trie", - "sp-version", - "sp-wasm-interface", - "wasmi", -] - -[[package]] -name = "sc-executor-common" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "parity-scale-codec", - "parity-wasm", - "sp-allocator", - "sp-core", - "sp-serializer", - "sp-wasm-interface", - "thiserror", - "wasmi", -] - -[[package]] -name = "sc-executor-wasmi" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "parity-scale-codec", - "sc-executor-common", - "sp-allocator", - "sp-core", - "sp-runtime-interface", - "sp-wasm-interface", - "wasmi", -] - -[[package]] -name = "sc-executor-wasmtime" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "parity-scale-codec", - "parity-wasm", - "pwasm-utils", - "sc-executor-common", - "scoped-tls", - "sp-allocator", - "sp-core", - "sp-runtime-interface", - "sp-wasm-interface", - "wasmtime", -] - -[[package]] -name = "sc-finality-grandpa" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "dyn-clone", - "finality-grandpa", - "fork-tree", - "futures 0.3.12", - "futures-timer 3.0.2", - "linked-hash-map", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rand 0.7.3", - "sc-block-builder", - "sc-client-api", - "sc-consensus", - "sc-keystore", - "sc-network", - "sc-network-gossip", - "sc-telemetry", - "serde_json", - "sp-api", - "sp-application-crypto", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-finality-grandpa", - "sp-inherents", - "sp-keystore", - "sp-runtime", - "sp-utils", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-finality-grandpa-rpc" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "finality-grandpa", - "futures 0.3.12", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-pubsub", - "log", - "parity-scale-codec", - "sc-client-api", - "sc-finality-grandpa", - "sc-rpc", - "serde", - "serde_json", - "sp-blockchain", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "sc-informant" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "ansi_term 0.12.1", - "futures 0.3.12", - "log", - "parity-util-mem", - "sc-client-api", - "sc-network", - "sp-blockchain", - "sp-runtime", - "sp-transaction-pool", - "sp-utils", - "wasm-timer", -] - -[[package]] -name = "sc-keystore" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "async-trait", - "derive_more", - "futures 0.3.12", - "futures-util", - "hex", - "merlin", - "parking_lot 0.11.1", - "rand 0.7.3", - "serde_json", - "sp-application-crypto", - "sp-core", - "sp-keystore", - "subtle 2.4.0", -] - -[[package]] -name = "sc-light" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "hash-db", - "lazy_static", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-client-api", - "sc-executor", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-externalities", - "sp-runtime", - "sp-state-machine", -] - -[[package]] -name = "sc-network" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "async-std", - "async-trait", - "asynchronous-codec 0.5.0", - "bitflags", - "bs58 0.4.0", - "bytes 1.0.0", - "cid", - "derive_more", - "either", - "erased-serde", - "fnv", - "fork-tree", - "futures 0.3.12", - "futures-timer 3.0.2", - "hex", - "ip_network", - "libp2p", - "linked-hash-map", - "linked_hash_set", - "log", - "lru", - "nohash-hasher", - "parity-scale-codec", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "prost", - "prost-build", - "rand 0.7.3", - "sc-block-builder", - "sc-client-api", - "sc-peerset", - "serde", - "serde_json", - "smallvec 1.6.1", - "sp-arithmetic", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-runtime", - "sp-utils", - "substrate-prometheus-endpoint", - "thiserror", - "unsigned-varint 0.6.0", - "void", - "wasm-timer", - "zeroize", -] - -[[package]] -name = "sc-network-gossip" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "futures-timer 3.0.2", - "libp2p", - "log", - "lru", - "sc-network", - "sp-runtime", - "substrate-prometheus-endpoint", - "wasm-timer", -] - -[[package]] -name = "sc-offchain" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures 0.3.12", - "futures-timer 3.0.2", - "hyper 0.13.10", - "hyper-rustls", - "log", - "num_cpus", - "parity-scale-codec", - "parking_lot 0.11.1", - "rand 0.7.3", - "sc-client-api", - "sc-keystore", - "sc-network", - "sp-api", - "sp-core", - "sp-offchain", - "sp-runtime", - "sp-utils", - "threadpool", -] - -[[package]] -name = "sc-peerset" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "libp2p", - "log", - "serde_json", - "sp-utils", - "wasm-timer", -] - -[[package]] -name = "sc-proposer-metrics" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "hash-db", - "jsonrpc-core 15.1.0", - "jsonrpc-pubsub", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-block-builder", - "sc-client-api", - "sc-executor", - "sc-keystore", - "sc-rpc-api", - "sc-tracing", - "serde_json", - "sp-api", - "sp-blockchain", - "sp-chain-spec", - "sp-core", - "sp-keystore", - "sp-offchain", - "sp-rpc", - "sp-runtime", - "sp-session", - "sp-state-machine", - "sp-transaction-pool", - "sp-utils", - "sp-version", -] - -[[package]] -name = "sc-rpc-api" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "futures 0.3.12", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-pubsub", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "serde", - "serde_json", - "sp-chain-spec", - "sp-core", - "sp-rpc", - "sp-runtime", - "sp-transaction-pool", - "sp-version", -] - -[[package]] -name = "sc-rpc-server" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.1.30", - "jsonrpc-core 15.1.0", - "jsonrpc-http-server", - "jsonrpc-ipc-server", - "jsonrpc-pubsub", - "jsonrpc-ws-server", - "log", - "serde", - "serde_json", - "sp-runtime", - "substrate-prometheus-endpoint", -] - -[[package]] -name = "sc-service" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "directories", - "exit-future", - "futures 0.1.30", - "futures 0.3.12", - "futures-timer 3.0.2", - "hash-db", - "jsonrpc-core 15.1.0", - "jsonrpc-pubsub", - "lazy_static", - "log", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rand 0.7.3", - "sc-block-builder", - "sc-chain-spec", - "sc-client-api", - "sc-client-db", - "sc-executor", - "sc-informant", - "sc-keystore", - "sc-light", - "sc-network", - "sc-offchain", - "sc-rpc", - "sc-rpc-server", - "sc-telemetry", - "sc-tracing", - "sc-transaction-pool", - "serde", - "serde_json", - "sp-api", - "sp-application-crypto", - "sp-block-builder", - "sp-blockchain", - "sp-consensus", - "sp-core", - "sp-externalities", - "sp-inherents", - "sp-io", - "sp-keystore", - "sp-runtime", - "sp-session", - "sp-state-machine", - "sp-tracing", - "sp-transaction-pool", - "sp-trie", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", - "tempfile", - "thiserror", - "tracing", - "tracing-futures", - "wasm-timer", -] - -[[package]] -name = "sc-state-db" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "parity-scale-codec", - "parity-util-mem", - "parity-util-mem-derive", - "parking_lot 0.11.1", - "sc-client-api", - "sp-core", - "thiserror", -] - -[[package]] -name = "sc-telemetry" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "chrono", - "futures 0.3.12", - "libp2p", - "log", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rand 0.7.3", - "serde", - "serde_json", - "sp-utils", - "take_mut", - "tracing", - "tracing-subscriber", - "void", - "wasm-timer", -] - -[[package]] -name = "sc-tracing" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "ansi_term 0.12.1", - "atty", - "erased-serde", - "lazy_static", - "log", - "once_cell", - "parking_lot 0.11.1", - "regex", - "rustc-hash", - "sc-telemetry", - "sc-tracing-proc-macro", - "serde", - "serde_json", - "sp-tracing", - "thiserror", - "tracing", - "tracing-core", - "tracing-log", - "tracing-subscriber", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "sc-tracing-proc-macro" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sc-transaction-graph" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "futures 0.3.12", - "linked-hash-map", - "log", - "parity-util-mem", - "parking_lot 0.11.1", - "retain_mut", - "serde", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-transaction-pool", - "sp-utils", - "thiserror", - "wasm-timer", -] - -[[package]] -name = "sc-transaction-pool" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "futures-diagnose", - "intervalier", - "log", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "sc-client-api", - "sc-transaction-graph", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-tracing", - "sp-transaction-pool", - "sp-utils", - "substrate-prometheus-endpoint", - "thiserror", - "wasm-timer", -] - -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi 0.3.9", -] - -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "curve25519-dalek 2.1.0", - "getrandom 0.1.15", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "serde", - "sha2 0.8.2", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "scroll" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" -dependencies = [ - "scroll_derive", -] - -[[package]] -name = "scroll_derive" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b12bd20b94c7cdfda8c7ba9b92ad0d9a56e3fa018c25fca83b51aa664c9b4c0d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sct" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "secrecy" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" -dependencies = [ - "zeroize", -] - -[[package]] -name = "security-framework" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - -[[package]] -name = "serde" -version = "1.0.123" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d5161132722baa40d802cc70b15262b98258453e85e5d1d365c757c73869ae" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde-big-array" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "883eee5198ea51720eab8be52a36cf6c0164ac90eea0ed95b649d5e35382404e" -dependencies = [ - "serde", - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.123" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9391c295d64fc0abb2c556bad848f33cb8296276b1ad2677d1ae1ace4f258f31" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.62" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea1c6153794552ea7cf7cf63b1231a25de00ec90db326ba6264440fa08e31486" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha-1" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -name = "sha-1" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce3cdf1b5e620a498ee6f2a171885ac7e22f0e12089ec4b3d22b84921792507c" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpuid-bool 0.1.2", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -name = "sha2" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpuid-bool 0.1.2", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sharded-slab" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" -dependencies = [ - "lazy_static", - "loom", -] - -[[package]] -name = "shlex" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" - -[[package]] -name = "signal-hook" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e31d442c16f047a671b5a71e2161d6e68814012b7f5379d269ebd915fac2729" -dependencies = [ - "libc", - "signal-hook-registry", -] - -[[package]] -name = "signal-hook-registry" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" -dependencies = [ - "libc", -] - -[[package]] -name = "signature" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" - -[[package]] -name = "simba" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb931b1367faadea6b1ab1c306a860ec17aaa5fa39f367d0c744e69d971a1fb2" -dependencies = [ - "approx", - "num-complex", - "num-traits", - "paste 0.1.18", -] - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - -[[package]] -name = "smallvec" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "snow" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" -dependencies = [ - "aes-gcm", - "blake2", - "chacha20poly1305", - "rand 0.7.3", - "rand_core 0.5.1", - "ring", - "rustc_version", - "sha2 0.9.2", - "subtle 2.4.0", - "x25519-dalek", -] - -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi 0.3.9", -] - -[[package]] -name = "soketto" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c9dab3f95c9ebdf3a88268c19af668f637a3c5039c2c56ff2d40b1b2d64a25b" -dependencies = [ - "base64 0.11.0", - "bytes 0.5.6", - "futures 0.3.12", - "http 0.2.2", - "httparse", - "log", - "rand 0.7.3", - "sha1", - "smallvec 1.6.1", - "static_assertions", - "thiserror", -] - -[[package]] -name = "soketto" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" -dependencies = [ - "base64 0.12.3", - "bytes 0.5.6", - "flate2", - "futures 0.3.12", - "httparse", - "log", - "rand 0.7.3", - "sha-1 0.9.2", -] - -[[package]] -name = "sp-allocator" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "sp-core", - "sp-std", - "sp-wasm-interface", - "thiserror", -] - -[[package]] -name = "sp-api" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "hash-db", - "parity-scale-codec", - "sp-api-proc-macro", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-version", - "thiserror", -] - -[[package]] -name = "sp-api-proc-macro" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "blake2-rfc", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-application-crypto" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "serde", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-arithmetic" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-authorship" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sp-inherents", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-block-builder" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-blockchain" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "log", - "lru", - "parity-scale-codec", - "parking_lot 0.11.1", - "sp-api", - "sp-consensus", - "sp-database", - "sp-runtime", - "sp-state-machine", - "thiserror", -] - -[[package]] -name = "sp-chain-spec" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "sp-consensus" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "futures-timer 3.0.2", - "libp2p", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "serde", - "sp-api", - "sp-core", - "sp-inherents", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", - "sp-utils", - "sp-version", - "substrate-prometheus-endpoint", - "thiserror", - "wasm-timer", -] - -[[package]] -name = "sp-consensus-aura" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sp-api", - "sp-application-crypto", - "sp-consensus-slots", - "sp-inherents", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "sp-consensus-babe" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "merlin", - "parity-scale-codec", - "sp-api", - "sp-application-crypto", - "sp-consensus", - "sp-consensus-slots", - "sp-consensus-vrf", - "sp-core", - "sp-inherents", - "sp-keystore", - "sp-runtime", - "sp-std", - "sp-timestamp", -] - -[[package]] -name = "sp-consensus-slots" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sp-arithmetic", - "sp-runtime", -] - -[[package]] -name = "sp-consensus-vrf" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "schnorrkel", - "sp-core", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-core" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "base58", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", - "futures 0.3.12", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", - "log", - "merlin", - "num-traits", - "parity-scale-codec", - "parity-util-mem", - "parking_lot 0.11.1", - "primitive-types", - "rand 0.7.3", - "regex", - "schnorrkel", - "secrecy", - "serde", - "sha2 0.9.2", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std", - "sp-storage", - "substrate-bip39", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", -] - -[[package]] -name = "sp-database" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "kvdb", - "parking_lot 0.11.1", -] - -[[package]] -name = "sp-debug-derive" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std", - "sp-storage", -] - -[[package]] -name = "sp-finality-grandpa" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "finality-grandpa", - "log", - "parity-scale-codec", - "serde", - "sp-api", - "sp-application-crypto", - "sp-core", - "sp-keystore", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-inherents" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "parking_lot 0.11.1", - "sp-core", - "sp-std", - "thiserror", -] - -[[package]] -name = "sp-io" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot 0.11.1", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime-interface", - "sp-state-machine", - "sp-std", - "sp-tracing", - "sp-trie", - "sp-wasm-interface", - "tracing", - "tracing-core", -] - -[[package]] -name = "sp-keyring" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "lazy_static", - "sp-core", - "sp-runtime", - "strum", -] - -[[package]] -name = "sp-keystore" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "async-trait", - "derive_more", - "futures 0.3.12", - "merlin", - "parity-scale-codec", - "parking_lot 0.11.1", - "schnorrkel", - "serde", - "sp-core", - "sp-externalities", -] - -[[package]] -name = "sp-offchain" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "sp-api", - "sp-core", - "sp-runtime", -] - -[[package]] -name = "sp-panic-handler" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "backtrace", -] - -[[package]] -name = "sp-rpc" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "serde", - "sp-core", -] - -[[package]] -name = "sp-runtime" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste 1.0.4", - "rand 0.7.3", - "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-runtime-interface" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "Inflector", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-serializer" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "serde", - "serde_json", -] - -[[package]] -name = "sp-session" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sp-api", - "sp-core", - "sp-runtime", - "sp-staking", - "sp-std", -] - -[[package]] -name = "sp-staking" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "parity-scale-codec", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-state-machine" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.1", - "rand 0.7.3", - "smallvec 1.6.1", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-std", - "sp-trie", - "thiserror", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-std" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" - -[[package]] -name = "sp-storage" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "ref-cast", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-tasks" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "sp-core", - "sp-externalities", - "sp-io", - "sp-runtime-interface", - "sp-std", -] - -[[package]] -name = "sp-timestamp" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-api", - "sp-inherents", - "sp-runtime", - "sp-std", - "wasm-timer", -] - -[[package]] -name = "sp-tracing" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "log", - "parity-scale-codec", - "sp-std", - "tracing", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sp-transaction-pool" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "derive_more", - "futures 0.3.12", - "log", - "parity-scale-codec", - "serde", - "sp-api", - "sp-blockchain", - "sp-runtime", - "thiserror", -] - -[[package]] -name = "sp-trie" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "sp-core", - "sp-std", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-utils" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "futures 0.3.12", - "futures-core", - "futures-timer 3.0.2", - "lazy_static", - "prometheus", -] - -[[package]] -name = "sp-version" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "serde", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "sp-wasm-interface" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "sp-std", - "wasmi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - -[[package]] -name = "standback" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf906c8b8fc3f6ecd1046e01da1d8ddec83e48c8b08b84dcc02b585a6bedf5a8" -dependencies = [ - "version_check", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "statrs" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382" -dependencies = [ - "rand 0.7.3", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - -[[package]] -name = "stream-cipher" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" -dependencies = [ - "block-cipher", - "generic-array 0.14.4", -] - -[[package]] -name = "string" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" -dependencies = [ - "bytes 0.4.12", -] - -[[package]] -name = "strsim" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" - -[[package]] -name = "structopt" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" -dependencies = [ - "clap", - "lazy_static", - "structopt-derive", -] - -[[package]] -name = "structopt-derive" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" -dependencies = [ - "heck", - "proc-macro-error", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "strum" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" -dependencies = [ - "strum_macros", -] - -[[package]] -name = "strum_macros" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "substrate-bip39" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" -dependencies = [ - "hmac 0.7.1", - "pbkdf2 0.3.0", - "schnorrkel", - "sha2 0.8.2", - "zeroize", -] - -[[package]] -name = "substrate-build-script-utils" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f14feab86fe31e7d0a485d53d7c1c634c426f7ae5b8ce4f705b2e49a35713fcb" -dependencies = [ - "platforms", -] - -[[package]] -name = "substrate-frame-rpc-system" -version = "3.0.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "frame-system-rpc-runtime-api", - "futures 0.3.12", - "jsonrpc-core 15.1.0", - "jsonrpc-core-client", - "jsonrpc-derive", - "log", - "parity-scale-codec", - "sc-client-api", - "sc-rpc-api", - "serde", - "sp-api", - "sp-block-builder", - "sp-blockchain", - "sp-core", - "sp-runtime", - "sp-transaction-pool", -] - -[[package]] -name = "substrate-prometheus-endpoint" -version = "0.9.0" -source = "git+https://github.com/paritytech/substrate.git?branch=master#528c14b3c96bb93d3029451f0706a079d7d7a9bb" -dependencies = [ - "async-std", - "derive_more", - "futures-util", - "hyper 0.13.10", - "log", - "prometheus", - "tokio 0.2.24", -] - -[[package]] -name = "substrate-relay" -version = "0.1.0" -dependencies = [ - "async-std", - "async-trait", - "bp-header-chain", - "bp-kusama", - "bp-message-lane", - "bp-millau", - "bp-polkadot", - "bp-rialto", - "bp-runtime", - "bridge-runtime-common", - "frame-support", - "futures 0.3.12", - "headers-relay", - "hex", - "log", - "messages-relay", - "millau-runtime", - "num-traits", - "pallet-bridge-call-dispatch", - "pallet-message-lane", - "pallet-substrate-bridge", - "parity-scale-codec", - "paste 1.0.4", - "relay-kusama-client", - "relay-millau-client", - "relay-polkadot-client", - "relay-rialto-client", - "relay-substrate-client", - "relay-utils", - "rialto-runtime", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", - "sp-trie", - "structopt", -] - -[[package]] -name = "substrate-wasm-builder-runner" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54cab12167e32b38a62c5ea5825aa0874cde315f907a46aad2b05aa8ef3d862f" - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" - -[[package]] -name = "syn" -version = "1.0.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c700597eca8a5a762beb35753ef6b94df201c81cca676604f547495a0d7f0081" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - -[[package]] -name = "sysinfo" -version = "0.15.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67330cbee3b2a819e3365a773f05e884a136603687f812bf24db5b6c3d76b696" -dependencies = [ - "cfg-if 0.1.10", - "doc-comment", - "libc", - "ntapi", - "once_cell", - "rayon", - "winapi 0.3.9", -] - -[[package]] -name = "take_mut" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" - -[[package]] -name = "tap" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36474e732d1affd3a6ed582781b3683df3d0563714c59c39591e8ff707cf078e" - -[[package]] -name = "target-lexicon" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee5a98e506fb7231a304c3a1bd7c132a55016cf65001e0282480665870dfcb9" - -[[package]] -name = "tempfile" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "rand 0.7.3", - "redox_syscall 0.1.57", - "remove_dir_all", - "winapi 0.3.9", -] - -[[package]] -name = "termcolor" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" -dependencies = [ - "unicode-width", -] - -[[package]] -name = "thiserror" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "time" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" -dependencies = [ - "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi 0.3.9", -] - -[[package]] -name = "time" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi 0.3.9", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - -[[package]] -name = "tiny-bip39" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" -dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.9.2", - "thiserror", - "unicode-normalization", - "zeroize", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "tokio" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "mio", - "num_cpus", - "tokio-codec", - "tokio-current-thread", - "tokio-executor", - "tokio-fs", - "tokio-io", - "tokio-reactor", - "tokio-sync", - "tokio-tcp", - "tokio-threadpool", - "tokio-timer", - "tokio-udp", - "tokio-uds", -] - -[[package]] -name = "tokio" -version = "0.2.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "099837d3464c16a808060bb3f02263b412f6fafcb5d01c533d309985fbeebe48" -dependencies = [ - "bytes 0.5.6", - "fnv", - "futures-core", - "iovec", - "lazy_static", - "libc", - "memchr", - "mio", - "mio-uds", - "num_cpus", - "pin-project-lite 0.1.11", - "signal-hook-registry", - "slab", - "winapi 0.3.9", -] - -[[package]] -name = "tokio-buf" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" -dependencies = [ - "bytes 0.4.12", - "either", - "futures 0.1.30", -] - -[[package]] -name = "tokio-codec" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "tokio-io", -] - -[[package]] -name = "tokio-current-thread" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" -dependencies = [ - "futures 0.1.30", - "tokio-executor", -] - -[[package]] -name = "tokio-executor" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.30", -] - -[[package]] -name = "tokio-fs" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" -dependencies = [ - "futures 0.1.30", - "tokio-io", - "tokio-threadpool", -] - -[[package]] -name = "tokio-io" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "log", -] - -[[package]] -name = "tokio-named-pipes" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "mio", - "mio-named-pipes", - "tokio 0.1.22", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.30", - "lazy_static", - "log", - "mio", - "num_cpus", - "parking_lot 0.9.0", - "slab", - "tokio-executor", - "tokio-io", - "tokio-sync", -] - -[[package]] -name = "tokio-rustls" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" -dependencies = [ - "futures-core", - "rustls 0.18.1", - "tokio 0.2.24", - "webpki", -] - -[[package]] -name = "tokio-service" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" -dependencies = [ - "futures 0.1.30", -] - -[[package]] -name = "tokio-sync" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" -dependencies = [ - "fnv", - "futures 0.1.30", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "iovec", - "mio", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-threadpool" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" -dependencies = [ - "crossbeam-deque 0.7.3", - "crossbeam-queue", - "crossbeam-utils 0.7.2", - "futures 0.1.30", - "lazy_static", - "log", - "num_cpus", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-timer" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.30", - "slab", - "tokio-executor", -] - -[[package]] -name = "tokio-udp" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "log", - "mio", - "tokio-codec", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-uds" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.30", - "iovec", - "libc", - "log", - "mio", - "mio-uds", - "tokio-codec", - "tokio-io", - "tokio-reactor", -] - -[[package]] -name = "tokio-util" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" -dependencies = [ - "bytes 0.5.6", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.1.11", - "tokio 0.2.24", -] - -[[package]] -name = "toml" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = [ - "serde", -] - -[[package]] -name = "tower-service" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" - -[[package]] -name = "tracing" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" -dependencies = [ - "cfg-if 1.0.0", - "log", - "pin-project-lite 0.2.4", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "tracing-futures" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" -dependencies = [ - "pin-project 0.4.27", - "tracing", -] - -[[package]] -name = "tracing-log" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" -dependencies = [ - "ansi_term 0.12.1", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec 1.6.1", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "trie-db" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc176c377eb24d652c9c69c832c832019011b6106182bf84276c66b66d5c9a6" -dependencies = [ - "hash-db", - "hashbrown 0.9.1", - "log", - "rustc-hex", - "smallvec 1.6.1", -] - -[[package]] -name = "trie-root" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" -dependencies = [ - "hash-db", -] - -[[package]] -name = "triehash" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f490aa7aa4e4d07edeba442c007e42e3e7f43aafb5112c5b047fff0b1aa5449c" -dependencies = [ - "hash-db", - "rlp 0.4.6", -] - -[[package]] -name = "try-lock" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" - -[[package]] -name = "twox-hash" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" -dependencies = [ - "cfg-if 0.1.10", - "rand 0.7.3", - "static_assertions", -] - -[[package]] -name = "typenum" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" - -[[package]] -name = "ucd-trie" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" - -[[package]] -name = "uint" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - -[[package]] -name = "unicode-bidi" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" -dependencies = [ - "matches", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-segmentation" -version = "1.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" - -[[package]] -name = "unicode-width" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" - -[[package]] -name = "unicode-xid" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" - -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", -] - -[[package]] -name = "unsigned-varint" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" - -[[package]] -name = "unsigned-varint" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" -dependencies = [ - "asynchronous-codec 0.5.0", - "bytes 1.0.0", - "futures-io", - "futures-util", -] - -[[package]] -name = "unsigned-varint" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.0", - "futures-io", - "futures-util", -] - -[[package]] -name = "untrusted" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" - -[[package]] -name = "url" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -dependencies = [ - "idna 0.1.5", - "matches", - "percent-encoding 1.0.1", -] - -[[package]] -name = "url" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5909f2b0817350449ed73e8bcd81c8c3c8d9a7a5d8acba4b27db277f1868976e" -dependencies = [ - "form_urlencoded", - "idna 0.2.0", - "matches", - "percent-encoding 2.1.0", -] - -[[package]] -name = "value-bag" -version = "1.0.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1" -dependencies = [ - "ctor", -] - -[[package]] -name = "vcpkg" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" - -[[package]] -name = "vec-arena" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" - -[[package]] -name = "vec_map" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" - -[[package]] -name = "vergen" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ce50d8996df1f85af15f2cd8d33daae6e479575123ef4314a51a70a230739cb" -dependencies = [ - "bitflags", - "chrono", -] - -[[package]] -name = "version_check" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" - -[[package]] -name = "void" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" - -[[package]] -name = "waker-fn" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" - -[[package]] -name = "want" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" -dependencies = [ - "futures 0.1.30", - "log", - "try-lock", -] - -[[package]] -name = "want" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" -dependencies = [ - "log", - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - -[[package]] -name = "wasm-bindgen" -version = "0.2.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" -dependencies = [ - "cfg-if 1.0.0", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" -dependencies = [ - "cfg-if 1.0.0", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" - -[[package]] -name = "wasm-timer" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" -dependencies = [ - "futures 0.3.12", - "js-sys", - "parking_lot 0.11.1", - "pin-utils", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "wasmi" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" -dependencies = [ - "libc", - "memory_units", - "num-rational", - "num-traits", - "parity-wasm", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "wasmparser" -version = "0.71.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89a30c99437829ede826802bfcf28500cf58df00e66cb9114df98813bc145ff1" - -[[package]] -name = "wasmtime" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7426055cb92bd9a1e9469b48154d8d6119cd8c498c8b70284e420342c05dc45d" -dependencies = [ - "anyhow", - "backtrace", - "bincode", - "cfg-if 1.0.0", - "cpp_demangle", - "indexmap", - "libc", - "log", - "region", - "rustc-demangle", - "serde", - "smallvec 1.6.1", - "target-lexicon", - "wasmparser", - "wasmtime-cache", - "wasmtime-environ", - "wasmtime-jit", - "wasmtime-profiling", - "wasmtime-runtime", - "wat", - "winapi 0.3.9", -] - -[[package]] -name = "wasmtime-cache" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c01d9287e36921e46f5887a47007824ae5dbb9b7517a2d565660ab4471478709" -dependencies = [ - "anyhow", - "base64 0.13.0", - "bincode", - "directories-next", - "errno", - "file-per-thread-logger", - "libc", - "log", - "serde", - "sha2 0.9.2", - "toml", - "winapi 0.3.9", - "zstd", -] - -[[package]] -name = "wasmtime-cranelift" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4134ed3a4316cd0de0e546c6004850afe472b0fa3fcdc2f2c15f8d449562d962" -dependencies = [ - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-wasm", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-debug" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91fa931df6dd8af2b02606307674d3bad23f55473d5f4c809dddf7e4c4dc411" -dependencies = [ - "anyhow", - "gimli", - "more-asserts", - "object", - "target-lexicon", - "thiserror", - "wasmparser", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-environ" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1098871dc3120aaf8190d79153e470658bb79f63ee9ca31716711e123c28220" -dependencies = [ - "anyhow", - "cfg-if 1.0.0", - "cranelift-codegen", - "cranelift-entity", - "cranelift-wasm", - "gimli", - "indexmap", - "log", - "more-asserts", - "serde", - "thiserror", - "wasmparser", -] - -[[package]] -name = "wasmtime-jit" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738bfcd1561ede8bb174215776fd7d9a95d5f0a47ca3deabe0282c55f9a89f68" -dependencies = [ - "addr2line", - "anyhow", - "cfg-if 1.0.0", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-native", - "cranelift-wasm", - "gimli", - "log", - "more-asserts", - "object", - "rayon", - "region", - "serde", - "target-lexicon", - "thiserror", - "wasmparser", - "wasmtime-cranelift", - "wasmtime-debug", - "wasmtime-environ", - "wasmtime-obj", - "wasmtime-profiling", - "wasmtime-runtime", - "winapi 0.3.9", -] - -[[package]] -name = "wasmtime-obj" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e96d77f1801131c5e86d93e42a3cf8a35402107332c202c245c83f34888a906" -dependencies = [ - "anyhow", - "more-asserts", - "object", - "target-lexicon", - "wasmtime-debug", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-profiling" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60bb672c9d894776d7b9250dd9b4fe890f8760201ee4f53e5f2da772b6c4debb" -dependencies = [ - "anyhow", - "cfg-if 1.0.0", - "gimli", - "lazy_static", - "libc", - "object", - "scroll", - "serde", - "target-lexicon", - "wasmtime-environ", - "wasmtime-runtime", -] - -[[package]] -name = "wasmtime-runtime" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a978086740949eeedfefcee667b57a9e98d9a7fc0de382fcfa0da30369e3530d" -dependencies = [ - "backtrace", - "cc", - "cfg-if 1.0.0", - "indexmap", - "lazy_static", - "libc", - "log", - "memoffset 0.6.1", - "more-asserts", - "psm", - "region", - "thiserror", - "wasmtime-environ", - "winapi 0.3.9", -] - -[[package]] -name = "wast" -version = "30.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b79907b22f740634810e882d8d1d9d0f9563095a8ab94e786e370242bff5cd2" -dependencies = [ - "leb128", -] - -[[package]] -name = "wat" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8279a02835bf12e61ed2b3c3cbc6ecf9918762fd97e036917c11a09ec20ca44" -dependencies = [ - "wast", -] - -[[package]] -name = "web-sys" -version = "0.3.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222b1ef9334f92a21d3fb53dc3fd80f30836959a90f9274a626d7e06315ba3c3" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "web3" -version = "0.15.0" -source = "git+https://github.com/tomusdrw/rust-web3.git?branch=td-ethabi#68dabc289bf9f5e59447d822c5da5b4c768175c6" -dependencies = [ - "arrayvec 0.5.2", - "derive_more", - "ethabi", - "ethereum-types", - "futures 0.3.12", - "futures-timer 3.0.2", - "hex", - "jsonrpc-core 17.0.0", - "log", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "rlp 0.5.0", - "serde", - "serde_json", - "tiny-keccak", -] - -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "webpki-roots" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a262ae37dd9d60f60dd473d1158f9fbebf110ba7b6a5051c8160460f6043718b" -dependencies = [ - "webpki", -] - -[[package]] -name = "webpki-roots" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" -dependencies = [ - "webpki", -] - -[[package]] -name = "wepoll-sys" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" -dependencies = [ - "cc", -] - -[[package]] -name = "which" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" -dependencies = [ - "libc", -] - -[[package]] -name = "which" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c14ef7e1b8b8ecfc75d5eca37949410046e66f15d185c01d70824f1f8111ef" -dependencies = [ - "libc", - "thiserror", -] - -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "x25519-dalek" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" -dependencies = [ - "curve25519-dalek 3.0.0", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "yaml-rust" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992" - -[[package]] -name = "yamux" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" -dependencies = [ - "futures 0.3.12", - "log", - "nohash-hasher", - "parking_lot 0.11.1", - "rand 0.7.3", - "static_assertions", -] - -[[package]] -name = "zeroize" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "zstd" -version = "0.5.4+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" -dependencies = [ - "libc", - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" -dependencies = [ - "cc", - "glob", - "itertools", - "libc", -] diff --git a/polkadot/bridges/Dockerfile b/polkadot/bridges/Dockerfile deleted file mode 100644 index a1ff908974caf6badc2882e22365744b6a850405..0000000000000000000000000000000000000000 --- a/polkadot/bridges/Dockerfile +++ /dev/null @@ -1,71 +0,0 @@ -# Builds images used by the bridge. -# -# In particular, it can be used to build Substrate nodes and bridge relayers. The binary that gets -# built can be specified with the `PROJECT` build-arg. For example, to build the `substrate-relay` -# you would do the following: -# -# `docker build . -t local/substrate-relay --build-arg=PROJECT=substrate-relay` -# -# See the `deployments/README.md` for all the available `PROJECT` values. - -# This first stage prepares our dependencies to be built by `cargo-chef`. -FROM rust as planner -WORKDIR /parity-bridges-common -RUN cargo install cargo-chef --version 0.1.13 -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -# This second stage is where the dependencies actually get built. -# The reason we split it from the first stage is so that the `COPY . .` -# step doesn't blow our cache. -FROM paritytech/bridge-dependencies AS cacher -WORKDIR /parity-bridges-common -RUN cargo install cargo-chef --version 0.1.13 - -COPY --from=planner /parity-bridges-common/recipe.json recipe.json -RUN cargo chef cook --release --recipe-path recipe.json - -# In this third stage we go ahead and build the actual binary we want. -# This should be fairly quick since the dependencies are being built and -# cached in the previous stage. -FROM paritytech/bridge-dependencies as builder -WORKDIR /parity-bridges-common -RUN cargo install cargo-chef --version 0.1.13 - -COPY . . -COPY --from=cacher /parity-bridges-common/target target -COPY --from=cacher $CARGO_HOME $CARGO_HOME - -ARG PROJECT=ethereum-poa-relay -RUN cargo build --release --verbose -p ${PROJECT} -RUN strip ./target/release/${PROJECT} - -# In this final stage we copy over the final binary and do some checks -# to make sure that everything looks good. -FROM ubuntu:xenial as runtime - -# show backtraces -ENV RUST_BACKTRACE 1 - -RUN set -eux; \ - apt-get update && \ - apt-get install -y libssl-dev curl - -RUN groupadd -g 1000 user \ - && useradd -u 1000 -g user -s /bin/sh -m user - -# switch to non-root user -USER user - -WORKDIR /home/user - -ARG PROJECT=ethereum-poa-relay - -COPY --chown=user:user --from=builder /parity-bridges-common/target/release/${PROJECT} ./ -COPY --chown=user:user --from=builder /parity-bridges-common/deployments/local-scripts/bridge-entrypoint.sh ./ - -# check if executable works in this container -RUN ./${PROJECT} --version - -ENV PROJECT=$PROJECT -ENTRYPOINT ["/home/user/bridge-entrypoint.sh"] diff --git a/polkadot/bridges/LICENSE b/polkadot/bridges/LICENSE deleted file mode 100644 index 733c072369ca77331f392c40da7404c85c36542c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/LICENSE +++ /dev/null @@ -1,675 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - {one line to give the program's name and a brief idea of what it does.} - Copyright (C) {year} {name of author} - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - {project} Copyright (C) {year} {fullname} - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/polkadot/bridges/README.md b/polkadot/bridges/README.md deleted file mode 100644 index eaa63ad14c77ba49be48f93baca5e62beb817787..0000000000000000000000000000000000000000 --- a/polkadot/bridges/README.md +++ /dev/null @@ -1,211 +0,0 @@ -# Parity Bridges Common - -This is a collection of components for building bridges. - -These components include Substrate pallets for syncing headers, passing arbitrary messages, as well -as libraries for building relayers to provide cross-chain communication capabilities. - -Three bridge nodes are also available. The nodes can be used to run test networks which bridge other -Substrate chains or Ethereum Proof-of-Authority chains. - -🚧 The bridges are currently under construction - a hardhat is recommended beyond this point 🚧 - -## Contents -- [Installation](#installation) -- [High-Level Architecture](#high-level-architecture) -- [Project Layout](#project-layout) -- [Running the Bridge](#running-the-bridge) -- [How to send a message](#how-to-send-a-message) -- [Community](#community) - -## Installation -To get up and running you need both stable and nightly Rust. Rust nightly is used to build the Web -Assembly (WASM) runtime for the node. You can configure the WASM support as so: - -``` -rustup install nightly -rustup target add wasm32-unknown-unknown --toolchain nightly -``` - -Once this is configured you can build and test the repo as follows: - -``` -git clone https://github.com/paritytech/parity-bridges-common.git -cd parity-bridges-common -cargo build --all -cargo test --all -``` - -If you need more information about setting up your development environment Substrate's -[Getting Started](https://substrate.dev/docs/en/knowledgebase/getting-started/) page is a good -resource. - -## High-Level Architecture - -This repo has support for bridging foreign chains together using a combination of Substrate pallets -and external processes called relayers. A bridge chain is one that is able to follow the consensus -of a foreign chain independently. For example, consider the case below where we want to bridge two -Substrate based chains. - -``` -+---------------+ +---------------+ -| | | | -| Rialto | | Millau | -| | | | -+-------+-------+ +-------+-------+ - ^ ^ - | +---------------+ | - | | | | - +-----> | Bridge Relay | <-------+ - | | - +---------------+ -``` - -The Millau chain must be able to accept Rialto headers and verify their integrity. It does this by -using a runtime module designed to track GRANDPA finality. Since two blockchains can't interact -directly they need an external service, called a relayer, to communicate. The relayer will subscribe -to new Rialto headers via RPC and submit them to the Millau chain for verification. - -Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) for more in-depth -description of the bridge interaction. - -## Project Layout -Here's an overview of how the project is laid out. The main bits are the `node`, which is the actual -"blockchain", the `modules` which are used to build the blockchain's logic (a.k.a the runtime) and -the `relays` which are used to pass messages between chains. - -``` -├── bin // Node and Runtime for the various Substrate chains -│ └── ... -├── deployments // Useful tools for deploying test networks -│ └── ... -├── diagrams // Pretty pictures of the project architecture -│ └── ... -├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── ethereum // Ethereum PoA Header Sync Module -│ ├── substrate // Substrate Based Chain Header Sync Module -│ ├── message-lane // Cross Chain Message Passing -│ └── ... -├── primitives // Code shared between modules, runtimes, and relays -│ └── ... -├── relays // Application for sending headers and messages between chains -│ └── ... -└── scripts // Useful development and maintenence scripts - ``` - -## Running the Bridge - -To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes -on each side of the bridge (source and target chain). - -There are 3 ways to run the bridge, described below: - - building & running from source, - - building or using Docker images for each individual component, - - running a Docker Compose setup (recommended). - -### Using the Source - -First you'll need to build the bridge nodes and relay. This can be done as follows: - -```bash -# In `parity-bridges-common` folder -cargo build -p rialto-bridge-node -cargo build -p millau-bridge-node -cargo build -p substrate-relay -``` - -### Running - -To run a simple dev network you'll can use the scripts located in -[the `deployments/local-scripts` folder](./deployments/local-scripts). Since the relayer connects to -both Substrate chains it must be run last. - -```bash -# In `parity-bridges-common` folder -./deployments/local-scripts/run-rialto-bridge-node.sh -./deployments/local-scripts/run-millau-bridge-node.sh -./deployments/local-scripts/relay-millau-to-rialto.sh -``` - -At this point you should see the relayer submitting headers from the Millau Substrate chain to the -Rialto Substrate chain. - -### Local Docker Setup - -To get up and running quickly you can use published Docker images for the bridge nodes and relayer. -The images are published on [Docker Hub](https://hub.docker.com/u/paritytech). - -To run the dev network we first run the two bridge nodes: - -```bash -docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ - -it paritytech/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external - -docker run -p 30334:30333 -p 9934:9933 -p 9945:9944 \ - -it paritytech/millau-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external -``` - -Notice that the `docker run` command will accept all the normal Substrate flags. For local -development you should at minimum run with the `--dev` flag or else no blocks will be produced. - -Then we need to initialize and run the relayer: - -```bash -docker run --network=host -it \ - paritytech/substrate-relay initialize-rialto-headers-bridge-in-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Alice - -docker run --network=host -it \ - paritytech/substrate-relay rialto-headers-to-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Bob \ -``` - -You should now see the relayer submitting headers from the Millau chain to the Rialto chain. - -If you don't want to use the published Docker images you can build images yourself. You can do this -by running the following commands at the top level of the repository. - -```bash -# In `parity-bridges-common` folder -docker build . -t local/rialto-bridge-node --build-arg PROJECT=rialto-bridge-node -docker build . -t local/millau-bridge-node --build-arg PROJECT=millau-bridge-node -docker build . -t local/substrate-relay --build-arg PROJECT=substrate-relay -``` - -_Note: Building the node images will take a long time, so make sure you have some coffee handy._ - -Once you have the images built you can use them in the previous commands by replacing -`paritytech/` with `local/` everywhere. - -### Full Network Docker Compose Setup - -For a more sophisticated deployment which includes bidirectional header sync, message passing, -monitoring dashboards, etc. see the [Deployments README](./deployments/README.md). - -### How to send a message - -A straightforward way to interact with and test the bridge is sending messages. This is explained -in the [send message](./docs/send-message.md) document. -## Community - -Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat -server like, for example, Discord. Most discussions around Polkadot and Substrate happen -in various Element "rooms" (channels). So, joining Element might be a good idea, anyway. - -If you are interested in information exchange and development of Polkadot related bridges please -feel free to join the [Polkadot Bridges](https://app.element.io/#/room/#bridges:web3.foundation) -Element channel. - -The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element -channel is most suited for discussions regarding Substrate itself. - diff --git a/polkadot/bridges/bin/.keep b/polkadot/bridges/bin/.keep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/polkadot/bridges/bin/millau/node/Cargo.toml b/polkadot/bridges/bin/millau/node/Cargo.toml deleted file mode 100644 index 6f7aa94e6ffa4de68ab88ae8e5053908872ab80d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/Cargo.toml +++ /dev/null @@ -1,61 +0,0 @@ -[package] -name = "millau-bridge-node" -description = "Substrate node compatible with Millau runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -build = "build.rs" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -jsonrpc-core = "15.1.0" -structopt = "0.3.21" - -# Bridge dependencies - -bp-message-lane = { path = "../../../primitives/message-lane" } -bp-millau= { path = "../../../primitives/millau" } -bp-runtime = { path = "../../../primitives/runtime" } -millau-runtime = { path = "../runtime" } -pallet-message-lane = { path = "../../../modules/message-lane" } -pallet-message-lane-rpc = { path = "../../../modules/message-lane/rpc" } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[build-dependencies] -build-script-utils = { package = "substrate-build-script-utils", version = "2.0" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -vergen = "3.1.0" - -[features] -default = [] - -# TODO: https://github.com/paritytech/parity-bridges-common/issues/390 -# I've left the feature flag here to test our CI configuration -runtime-benchmarks = [ - # "millau-runtime/runtime-benchmarks", -] diff --git a/polkadot/bridges/bin/millau/node/build.rs b/polkadot/bridges/bin/millau/node/build.rs deleted file mode 100644 index e9a10ff8ad009ae29752bd6db4fc03b6bebc977e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/build.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use vergen::{generate_cargo_keys, ConstantsFlags}; - -const ERROR_MSG: &str = "Failed to generate metadata files"; - -fn main() { - generate_cargo_keys(ConstantsFlags::SHA_SHORT).expect(ERROR_MSG); - - build_script_utils::rerun_if_git_head_changed(); -} diff --git a/polkadot/bridges/bin/millau/node/src/chain_spec.rs b/polkadot/bridges/bin/millau/node/src/chain_spec.rs deleted file mode 100644 index 8e9aded9f11d3d2e4c7f87e1a6d96075a9153e31..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/src/chain_spec.rs +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use bp_millau::derive_account_from_rialto_id; -use millau_runtime::{ - AccountId, AuraConfig, BalancesConfig, BridgeRialtoConfig, GenesisConfig, GrandpaConfig, SessionConfig, - SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, -}; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{sr25519, Pair, Public}; -use sp_finality_grandpa::AuthorityId as GrandpaId; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec; - -/// The chain specification option. This is expected to come in from the CLI and -/// is little more than one of a number of alternatives which can easily be converted -/// from a string (`--chain=...`) into a `ChainSpec`. -#[derive(Clone, Debug)] -pub enum Alternative { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob/Charlie/Dave/Eve auths. - LocalTestnet, -} - -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -type AccountPublic = ::Signer; - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) { - ( - get_account_id_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - ) -} - -impl Alternative { - /// Get an actual chain config from one of the alternatives. - pub(crate) fn load(self) -> ChainSpec { - match self { - Alternative::Development => ChainSpec::from_genesis( - "Development", - "dev", - sc_service::ChainType::Development, - || { - testnet_genesis( - vec![get_authority_keys_from_seed("Alice")], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ) - }, - vec![], - None, - None, - None, - None, - ), - Alternative::LocalTestnet => ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - sc_service::ChainType::Local, - || { - testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - get_authority_keys_from_seed("Charlie"), - get_authority_keys_from_seed("Dave"), - get_authority_keys_from_seed("Eve"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("George"), - get_account_id_from_seed::("Harry"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - get_account_id_from_seed::("George//stash"), - get_account_id_from_seed::("Harry//stash"), - pallet_message_lane::Module::::relayer_fund_account_id(), - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Dave"), - )), - ], - true, - ) - }, - vec![], - None, - None, - None, - None, - ), - } - } -} - -fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys { - SessionKeys { aura, grandpa } -} - -fn testnet_genesis( - initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>, - root_key: AccountId, - endowed_accounts: Vec, - _enable_println: bool, -) -> GenesisConfig { - GenesisConfig { - frame_system: Some(SystemConfig { - code: WASM_BINARY.to_vec(), - changes_trie_config: Default::default(), - }), - pallet_balances: Some(BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), - }), - pallet_aura: Some(AuraConfig { - authorities: Vec::new(), - }), - pallet_grandpa: Some(GrandpaConfig { - authorities: Vec::new(), - }), - pallet_substrate_bridge: Some(BridgeRialtoConfig { - // We'll initialize the pallet with a dispatchable instead. - init_data: None, - owner: Some(root_key.clone()), - }), - pallet_sudo: Some(SudoConfig { key: root_key }), - pallet_session: Some(SessionConfig { - keys: initial_authorities - .iter() - .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) - .collect::>(), - }), - } -} - -#[test] -fn derived_dave_account_is_as_expected() { - let dave = get_account_id_from_seed::("Dave"); - let derived: AccountId = derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave)); - assert_eq!( - derived.to_string(), - "5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string() - ); -} diff --git a/polkadot/bridges/bin/millau/node/src/cli.rs b/polkadot/bridges/bin/millau/node/src/cli.rs deleted file mode 100644 index 1149c4f910c8dd1ec9eaa7cdc0f041aae5ae70bb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/src/cli.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use sc_cli::RunCmd; -use structopt::StructOpt; - -#[derive(Debug, StructOpt)] -pub struct Cli { - #[structopt(subcommand)] - pub subcommand: Option, - - #[structopt(flatten)] - pub run: RunCmd, -} - -/// Possible subcommands of the main binary. -#[derive(Debug, StructOpt)] -pub enum Subcommand { - /// Key management cli utilities - Key(sc_cli::KeySubcommand), - /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. - Verify(sc_cli::VerifyCmd), - - /// Generate a seed that provides a vanity address. - Vanity(sc_cli::VanityCmd), - - /// Sign a message, with a given (secret) key. - Sign(sc_cli::SignCmd), - - /// Build a chain specification. - BuildSpec(sc_cli::BuildSpecCmd), - - /// Validate blocks. - CheckBlock(sc_cli::CheckBlockCmd), - - /// Export blocks. - ExportBlocks(sc_cli::ExportBlocksCmd), - - /// Export the state of a given block into a chain spec. - ExportState(sc_cli::ExportStateCmd), - - /// Import blocks. - ImportBlocks(sc_cli::ImportBlocksCmd), - - /// Remove the whole chain. - PurgeChain(sc_cli::PurgeChainCmd), - - /// Revert the chain to a previous state. - Revert(sc_cli::RevertCmd), - - /// The custom benchmark subcommmand benchmarking runtime pallets. - #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] - Benchmark(frame_benchmarking_cli::BenchmarkCmd), -} diff --git a/polkadot/bridges/bin/millau/node/src/command.rs b/polkadot/bridges/bin/millau/node/src/command.rs deleted file mode 100644 index 8751a4516d1de438b2bc4d67f4bbea6f78e91ef3..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/src/command.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::{Cli, Subcommand}; -use crate::service; -use crate::service::new_partial; -use millau_runtime::Block; -use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; -use sc_service::PartialComponents; - -impl SubstrateCli for Cli { - fn impl_name() -> String { - "Millau Bridge Node".into() - } - - fn impl_version() -> String { - env!("CARGO_PKG_VERSION").into() - } - - fn description() -> String { - "Millau Bridge Node".into() - } - - fn author() -> String { - "Parity Technologies".into() - } - - fn support_url() -> String { - "https://github.com/paritytech/parity-bridges-common/".into() - } - - fn copyright_start_year() -> i32 { - 2019 - } - - fn executable_name() -> String { - "millau-bridge-node".into() - } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &millau_runtime::VERSION - } - - fn load_spec(&self, id: &str) -> Result, String> { - Ok(Box::new( - match id { - "" | "dev" => crate::chain_spec::Alternative::Development, - "local" => crate::chain_spec::Alternative::LocalTestnet, - _ => return Err(format!("Unsupported chain specification: {}", id)), - } - .load(), - )) - } -} - -/// Parse and run command line arguments -pub fn run() -> sc_cli::Result<()> { - let cli = Cli::from_args(); - // make sure to set correct crypto version. - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( - millau_runtime::SS58Prefix::get() as u16, - )); - - match &cli.subcommand { - Some(Subcommand::Benchmark(cmd)) => { - if cfg!(feature = "runtime-benchmarks") { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run::(config)) - } else { - println!( - "Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - ); - Ok(()) - } - } - Some(Subcommand::Key(cmd)) => cmd.run(&cli), - Some(Subcommand::Sign(cmd)) => cmd.run(), - Some(Subcommand::Verify(cmd)) => cmd.run(), - Some(Subcommand::Vanity(cmd)) => cmd.run(), - Some(Subcommand::BuildSpec(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) - } - Some(Subcommand::CheckBlock(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - } - Some(Subcommand::ExportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; - Ok((cmd.run(client, config.database), task_manager)) - }) - } - Some(Subcommand::ExportState(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; - Ok((cmd.run(client, config.chain_spec), task_manager)) - }) - } - Some(Subcommand::ImportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - } - Some(Subcommand::PurgeChain(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.database)) - } - Some(Subcommand::Revert(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - backend, - .. - } = new_partial(&config)?; - Ok((cmd.run(client, backend), task_manager)) - }) - } - None => { - let runner = cli.create_runner(&cli.run)?; - runner.run_node_until_exit(|config| async move { - match config.role { - Role::Light => service::new_light(config), - _ => service::new_full(config), - } - .map_err(sc_cli::Error::Service) - }) - } - } -} diff --git a/polkadot/bridges/bin/millau/node/src/lib.rs b/polkadot/bridges/bin/millau/node/src/lib.rs deleted file mode 100644 index fdecc0b45f0ea0ba8b2a2f7e788d4690793f2399..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/src/lib.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate Node Template CLI library. -#![warn(missing_docs)] - -mod chain_spec; -#[macro_use] -mod service; -mod cli; -mod command; - -/// Node run result. -pub type Result = sc_cli::Result<()>; - -/// Run node. -pub fn run() -> Result { - command::run() -} diff --git a/polkadot/bridges/bin/millau/node/src/main.rs b/polkadot/bridges/bin/millau/node/src/main.rs deleted file mode 100644 index 07ec88727df5f556100f95cec6ef494b517b0da3..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/src/main.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau bridge node. - -#![warn(missing_docs)] - -mod chain_spec; -#[macro_use] -mod service; -mod cli; -mod command; - -/// Run the Millau Node -fn main() -> sc_cli::Result<()> { - command::run() -} diff --git a/polkadot/bridges/bin/millau/node/src/service.rs b/polkadot/bridges/bin/millau/node/src/service.rs deleted file mode 100644 index 2f72e5717faee3a9db387b7b41231f13b337ab1c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/node/src/service.rs +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== -// UPDATE GUIDE: -// 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); -// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs; -// 3) fix compilation errors; -// 4) test :) -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== - -use millau_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{ExecutorProvider, RemoteBackend}; -use sc_executor::native_executor_instance; -pub use sc_executor::NativeExecutor; -use sc_finality_grandpa::SharedVoterState; -use sc_keystore::LocalKeystore; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; -use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -use sp_inherents::InherentDataProviders; -use std::sync::Arc; -use std::time::Duration; - -// Our native executor instance. -native_executor_instance!( - pub Executor, - millau_runtime::api::dispatch, - millau_runtime::native_version, - frame_benchmarking::benchmarking::HostFunctions, -); - -type FullClient = sc_service::TFullClient; -type FullBackend = sc_service::TFullBackend; -type FullSelectChain = sc_consensus::LongestChain; - -#[allow(clippy::type_complexity)] -pub fn new_partial( - config: &Configuration, -) -> Result< - sc_service::PartialComponents< - FullClient, - FullBackend, - FullSelectChain, - sp_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - sc_consensus_aura::AuraBlockImport< - Block, - FullClient, - sc_finality_grandpa::GrandpaBlockImport, - AuraPair, - >, - sc_finality_grandpa::LinkHalf, - ), - >, - ServiceError, -> { - if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); - } - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(&config)?; - let client = Arc::new(client); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_handle(), - client.clone(), - ); - - let (grandpa_block_import, grandpa_link) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?; - - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import.clone(), - Some(Box::new(grandpa_block_import)), - client.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - )?; - - Ok(sc_service::PartialComponents { - client, - backend, - task_manager, - import_queue, - keystore_container, - select_chain, - transaction_pool, - inherent_data_providers, - other: (aura_block_import, grandpa_link), - }) -} - -fn remote_keystore(_url: &str) -> Result, &'static str> { - // FIXME: here would the concrete keystore be built, - // must return a concrete type (NOT `LocalKeystore`) that - // implements `CryptoStore` and `SyncCryptoStore` - Err("Remote Keystore not supported.") -} - -/// Builds a new service for a full client. -pub fn new_full(mut config: Configuration) -> Result { - let sc_service::PartialComponents { - client, - backend, - mut task_manager, - import_queue, - mut keystore_container, - select_chain, - transaction_pool, - inherent_data_providers, - other: (block_import, grandpa_link), - } = new_partial(&config)?; - - if let Some(url) = &config.keystore_remote { - match remote_keystore(url) { - Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => { - return Err(ServiceError::Other(format!( - "Error hooking up remote keystore for {}: {}", - url, e - ))) - } - }; - } - - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); - - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: None, - block_announce_validator_builder: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let role = config.role.clone(); - let force_authoring = config.force_authoring; - let backoff_authoring_blocks: Option<()> = None; - let name = config.network.node_name.clone(); - let enable_grandpa = !config.disable_grandpa; - let prometheus_registry = config.prometheus_registry().cloned(); - - let rpc_extensions_builder = { - use bp_message_lane::{LaneId, MessageNonce}; - use bp_runtime::{InstanceId, RIALTO_BRIDGE_INSTANCE}; - use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; - use sp_core::storage::StorageKey; - - // This struct is here to ease update process. - - /// Millau runtime from message-lane RPC point of view. - struct MillauMessageLaneKeys; - - impl pallet_message_lane_rpc::Runtime for MillauMessageLaneKeys { - fn message_key(&self, instance: &InstanceId, lane: &LaneId, nonce: MessageNonce) -> Option { - match *instance { - RIALTO_BRIDGE_INSTANCE => Some(millau_runtime::rialto_messages::message_key(lane, nonce)), - _ => None, - } - } - - fn outbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - RIALTO_BRIDGE_INSTANCE => Some(millau_runtime::rialto_messages::outbound_lane_data_key(lane)), - _ => None, - } - } - - fn inbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - RIALTO_BRIDGE_INSTANCE => Some(millau_runtime::rialto_messages::inbound_lane_data_key(lane)), - _ => None, - } - } - } - - use pallet_message_lane_rpc::{MessageLaneApi, MessageLaneRpcHandler}; - use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; - use sc_rpc::DenyUnsafe; - use substrate_frame_rpc_system::{FullSystem, SystemApi}; - - let backend = backend.clone(); - let client = client.clone(); - let pool = transaction_pool.clone(); - - let justification_stream = grandpa_link.justification_stream(); - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); - - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), Some(shared_authority_set.clone())); - - Box::new(move |_, subscription_executor| { - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with(SystemApi::to_delegate(FullSystem::new( - client.clone(), - pool.clone(), - DenyUnsafe::No, - ))); - io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( - shared_authority_set.clone(), - shared_voter_state.clone(), - justification_stream.clone(), - subscription_executor, - finality_proof_provider.clone(), - ))); - io.extend_with(MessageLaneApi::to_delegate(MessageLaneRpcHandler::new( - backend.clone(), - Arc::new(MillauMessageLaneKeys), - ))); - - io - }) - }; - - let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, - backend, - network_status_sinks, - system_rpc_tx, - config, - telemetry_span: None, - })?; - - if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry.as_ref(), - ); - - let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - client.clone(), - select_chain, - block_import, - proposer, - network.clone(), - inherent_data_providers, - force_authoring, - backoff_authoring_blocks, - keystore_container.sync_keystore(), - can_author_with, - )?; - - // the AURA authoring task is considered essential, i.e. if it - // fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking("aura", aura); - } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None - }; - - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - is_authority: role.is_authority(), - }; - - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: non-authorities could run the GRANDPA observer protocol, but at - // this point the full voter should provide better guarantees of block - // and vote data availability than the observer. The observer has not - // been tested extensively yet and having most nodes in a network run it - // could lead to finality stalls. - let grandpa_config = sc_finality_grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network, - telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), - voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), - prometheus_registry, - shared_voter_state: SharedVoterState::empty(), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - task_manager - .spawn_essential_handle() - .spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?); - } - - network_starter.start_network(); - Ok(task_manager) -} - -/// Builds a new service for a light client. -pub fn new_light(mut config: Configuration) -> Result { - let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; - - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( - config.transaction_pool.clone(), - config.prometheus_registry(), - task_manager.spawn_handle(), - client.clone(), - on_demand.clone(), - )); - - let (grandpa_block_import, _) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; - - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import)), - client.clone(), - InherentDataProviders::new(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::NeverCanAuthor, - )?; - - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - remote_blockchain: Some(backend.remote_blockchain()), - transaction_pool, - task_manager: &mut task_manager, - on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_, _| ()), - config, - client, - keystore: keystore_container.sync_keystore(), - backend, - network, - network_status_sinks, - system_rpc_tx, - telemetry_span: None, - })?; - - network_starter.start_network(); - - Ok(task_manager) -} diff --git a/polkadot/bridges/bin/millau/runtime/Cargo.toml b/polkadot/bridges/bin/millau/runtime/Cargo.toml deleted file mode 100644 index d163661284b638e623f4ae6d241b622d3bea4c70..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/runtime/Cargo.toml +++ /dev/null @@ -1,101 +0,0 @@ -[package] -name = "millau-runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -hex-literal = "0.3" -serde = { version = "1.0.123", optional = true, features = ["derive"] } - -# Bridge dependencies - -bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } -bp-message-lane = { path = "../../../primitives/message-lane", default-features = false } -bp-millau = { path = "../../../primitives/millau", default-features = false } -bp-rialto = { path = "../../../primitives/rialto", default-features = false } -bp-runtime = { path = "../../../primitives/runtime", default-features = false } -bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-call-dispatch = { path = "../../../modules/call-dispatch", default-features = false } -pallet-finality-verifier = { path = "../../../modules/finality-verifier", default-features = false } -pallet-message-lane = { path = "../../../modules/message-lane", default-features = false } -pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } -pallet-substrate-bridge = { path = "../../../modules/substrate", default-features = false } - -# Substrate Dependencies - -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-message-lane/std", - "bp-millau/std", - "bp-rialto/std", - "bp-runtime/std", - "bridge-runtime-common/std", - "codec/std", - "frame-executive/std", - "frame-support/std", - "frame-system/std", - "frame-system-rpc-runtime-api/std", - "pallet-aura/std", - "pallet-balances/std", - "pallet-bridge-call-dispatch/std", - "pallet-finality-verifier/std", - "pallet-grandpa/std", - "pallet-message-lane/std", - "pallet-randomness-collective-flip/std", - "pallet-shift-session-manager/std", - "pallet-session/std", - "pallet-substrate-bridge/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment/std", - "serde", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-inherents/std", - "sp-finality-grandpa/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-transaction-pool/std", - "sp-trie/std", - "sp-version/std", -] diff --git a/polkadot/bridges/bin/millau/runtime/build.rs b/polkadot/bridges/bin/millau/runtime/build.rs deleted file mode 100644 index 4fda040c9bd14fecac3ba095b50994fd9b9cf691..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/runtime/build.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use wasm_builder_runner::WasmBuilder; - -fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates("1.0.11") - .export_heap_base() - .import_memory() - .build() -} diff --git a/polkadot/bridges/bin/millau/runtime/src/lib.rs b/polkadot/bridges/bin/millau/runtime/src/lib.rs deleted file mode 100644 index 491359fc97f4faf92fb48afbc92620fed4591e1b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/runtime/src/lib.rs +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The Millau runtime. This can be compiled with `#[no_std]`, ready for Wasm. - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] -// Runtime-generated DecodeLimit::decode_all_With_depth_limit -#![allow(clippy::unnecessary_mut_passed)] -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -pub mod rialto_messages; - -use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge}; - -use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge}; -use codec::Decode; -use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use sp_api::impl_runtime_apis; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, MultiSigner, -}; -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -// A few exports that help ease life for downstream crates. -pub use frame_support::{ - construct_runtime, parameter_types, - traits::{Currency, ExistenceRequirement, Imbalance, KeyOwnerProofSystem, Randomness}, - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, RuntimeDbWeight, Weight}, - StorageValue, -}; - -pub use frame_system::Call as SystemCall; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_message_lane::Call as MessageLaneCall; -pub use pallet_substrate_bridge::Call as BridgeRialtoCall; -pub use pallet_sudo::Call as SudoCall; -pub use pallet_timestamp::Call as TimestampCall; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; - -/// An index to a block. -pub type BlockNumber = bp_millau::BlockNumber; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = bp_millau::Signature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = bp_millau::AccountId; - -/// The type for looking up accounts. We don't expect more than 4 billion of them, but you -/// never know... -pub type AccountIndex = u32; - -/// Balance of an account. -pub type Balance = bp_millau::Balance; - -/// Index of a transaction in the chain. -pub type Index = u32; - -/// A hash of some data used by the chain. -pub type Hash = bp_millau::Hash; - -/// Hashing algorithm used by the chain. -pub type Hashing = bp_millau::Hasher; - -/// Digest item type. -pub type DigestItem = generic::DigestItem; - -/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know -/// the specifics of the runtime. They can then be made to be agnostic over specific formats -/// of data like extrinsics, allowing for them to continue syncing the network through upgrades -/// to even the core data structures. -pub mod opaque { - use super::*; - - pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; - - /// Opaque block header type. - pub type Header = generic::Header; - /// Opaque block type. - pub type Block = generic::Block; - /// Opaque block identifier type. - pub type BlockId = generic::BlockId; -} - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - pub grandpa: Grandpa, - } -} - -/// This runtime version. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("millau-runtime"), - impl_name: create_runtime_str!("millau-runtime"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } -} - -parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const Version: RuntimeVersion = VERSION; - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 60_000_000, // ~0.06 ms = ~60 µs - write: 200_000_000, // ~0.2 ms = 200 µs - }; - pub const SS58Prefix: u8 = 60; -} - -impl frame_system::Config for Runtime { - /// The basic call filter to use in dispatchable. - type BaseCallFilter = (); - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; - /// The index type for storing how many extrinsics an account has signed. - type Index = Index; - /// The index type for blocks. - type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = Hashing; - /// The header type. - type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous origin type. - type Origin = Origin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Version of the runtime. - type Version = Version; - /// Provides information about the pallet setup in the runtime. - type PalletInfo = PalletInfo; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); - /// Block and extrinsics weights: base values and limits. - type BlockWeights = bp_millau::BlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = bp_millau::BlockLength; - /// The weight of database operations that the runtime can invoke. - type DbWeight = DbWeight; - /// The designated SS58 prefix of this chain. - type SS58Prefix = SS58Prefix; - type OnSetCode = (); -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; -} -impl pallet_bridge_call_dispatch::Config for Runtime { - type Event = Event; - type MessageId = (bp_message_lane::LaneId, bp_message_lane::MessageNonce); - type Call = Call; - type CallFilter = (); - type EncodedCall = crate::rialto_messages::FromRialtoEncodedCall; - type SourceChainAccountId = bp_rialto::AccountId; - type TargetChainAccountPublic = MultiSigner; - type TargetChainSignature = MultiSignature; - type AccountIdConverter = bp_millau::AccountIdConverter; -} - -impl pallet_grandpa::Config for Runtime { - type Event = Event; - type Call = Call; - type KeyOwnerProofSystem = (); - type KeyOwnerProof = >::Proof; - type KeyOwnerIdentification = - >::IdentificationTuple; - type HandleEquivocation = (); - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -parameter_types! { - pub const MinimumPeriod: u64 = bp_millau::SLOT_DURATION / 2; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = MinimumPeriod; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: bp_millau::Balance = 500; - // For weight estimation, we assume that the most locks on an individual account will be 50. - // This number may need to be adjusted in the future if this assumption no longer holds true. - pub const MaxLocks: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); - type MaxLocks = MaxLocks; -} - -parameter_types! { - pub const TransactionBaseFee: Balance = 0; - pub const TransactionByteFee: Balance = 1; -} - -impl pallet_transaction_payment::Config for Runtime { - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = IdentityFee; - type FeeMultiplierUpdate = (); -} - -impl pallet_sudo::Config for Runtime { - type Event = Event; - type Call = Call; -} - -parameter_types! { - /// Authorities are changing every 5 minutes. - pub const Period: BlockNumber = bp_millau::SESSION_LENGTH; - pub const Offset: BlockNumber = 0; -} - -impl pallet_session::Config for Runtime { - type Event = Event; - type ValidatorId = ::AccountId; - type ValidatorIdOf = (); - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = pallet_shift_session_manager::Module; - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type DisabledValidatorsThreshold = (); - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -impl pallet_substrate_bridge::Config for Runtime { - type BridgedChain = bp_rialto::Rialto; -} - -parameter_types! { - // This is a pretty unscientific cap. - // - // Note that once this is hit the pallet will essentially throttle incoming requests down to one - // call per block. - pub const MaxRequests: u32 = 50; -} - -impl pallet_finality_verifier::Config for Runtime { - type BridgedChain = bp_rialto::Rialto; - type HeaderChain = pallet_substrate_bridge::Module; - type AncestryProof = Vec; - type AncestryChecker = bp_header_chain::LinearAncestryChecker; - type MaxRequests = MaxRequests; -} - -impl pallet_shift_session_manager::Config for Runtime {} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: bp_message_lane::MessageNonce = 8; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_message_lane::MessageNonce = - bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_message_lane::MessageNonce = - bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE; - // `IdentityFee` is used by Millau => we may use weight directly - pub const GetDeliveryConfirmationTransactionFee: Balance = - bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; - pub const RootAccountForPayments: Option = None; -} - -impl pallet_message_lane::Config for Runtime { - type Event = Event; - // TODO: https://github.com/paritytech/parity-bridges-common/issues/390 - type WeightInfo = pallet_message_lane::weights::RialtoWeight; - type Parameter = rialto_messages::MillauToRialtoMessageLaneParameter; - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = crate::rialto_messages::ToRialtoMessagePayload; - type OutboundMessageFee = Balance; - - type InboundPayload = crate::rialto_messages::FromRialtoMessagePayload; - type InboundMessageFee = bp_rialto::Balance; - type InboundRelayer = bp_rialto::AccountId; - - type AccountIdConverter = bp_millau::AccountIdConverter; - - type TargetHeaderChain = crate::rialto_messages::Rialto; - type LaneMessageVerifier = crate::rialto_messages::ToRialtoMessageVerifier; - type MessageDeliveryAndDispatchPayment = pallet_message_lane::instant_payments::InstantCurrencyPayments< - Runtime, - pallet_balances::Pallet, - GetDeliveryConfirmationTransactionFee, - RootAccountForPayments, - >; - - type SourceHeaderChain = crate::rialto_messages::Rialto; - type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch; -} - -construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - BridgeRialto: pallet_substrate_bridge::{Pallet, Call, Storage, Config}, - BridgeRialtoMessageLane: pallet_message_lane::{Pallet, Call, Storage, Event}, - BridgeCallDispatch: pallet_bridge_call_dispatch::{Pallet, Event}, - BridgeFinalityVerifier: pallet_finality_verifier::{Pallet, Call}, - System: frame_system::{Pallet, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Aura: pallet_aura::{Pallet, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, - Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - ShiftSessionManager: pallet_shift_session_manager::{Pallet}, - } -); - -/// The address format for describing accounts. -pub type Address = AccountId; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); -/// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -/// Executive: handles dispatch to the various modules. -pub type Executive = - frame_executive::Executive, Runtime, AllPallets>; - -impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - - fn random_seed() -> ::Hash { - RandomnessCollectiveFlip::random_seed().0 - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { - System::account_nonce(account) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() - } - - fn authorities() -> Vec { - Aura::authorities() - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn grandpa_authorities() -> GrandpaAuthorityList { - Grandpa::grandpa_authorities() - } - - fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: fg_primitives::EquivocationProof< - ::Hash, - NumberFor, - >, - key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, - ) -> Option<()> { - let key_owner_proof = key_owner_proof.decode()?; - - Grandpa::submit_unsigned_equivocation_report( - equivocation_proof, - key_owner_proof, - ) - } - - fn generate_key_ownership_proof( - _set_id: fg_primitives::SetId, - _authority_id: GrandpaId, - ) -> Option { - // NOTE: this is the only implementation possible since we've - // defined our key owner proof type as a bottom type (i.e. a type - // with no values). - None - } - } - - impl bp_rialto::RialtoHeaderApi for Runtime { - fn best_blocks() -> Vec<(bp_rialto::BlockNumber, bp_rialto::Hash)> { - BridgeRialto::best_headers() - } - - fn finalized_block() -> (bp_rialto::BlockNumber, bp_rialto::Hash) { - let header = BridgeRialto::best_finalized(); - (header.number, header.hash()) - } - - fn incomplete_headers() -> Vec<(bp_rialto::BlockNumber, bp_rialto::Hash)> { - BridgeRialto::require_justifications() - } - - fn is_known_block(hash: bp_rialto::Hash) -> bool { - BridgeRialto::is_known_header(hash) - } - - fn is_finalized_block(hash: bp_rialto::Hash) -> bool { - BridgeRialto::is_finalized_header(hash) - } - } - - impl bp_rialto::ToRialtoOutboundLaneApi for Runtime { - fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_message_lane::LaneId, - payload: ToRialtoMessagePayload, - ) -> Option { - estimate_message_dispatch_and_delivery_fee::( - &payload, - WithRialtoMessageBridge::RELAYER_FEE_PERCENT, - ).ok() - } - - fn messages_dispatch_weight( - lane: bp_message_lane::LaneId, - begin: bp_message_lane::MessageNonce, - end: bp_message_lane::MessageNonce, - ) -> Vec<(bp_message_lane::MessageNonce, Weight, u32)> { - (begin..=end).filter_map(|nonce| { - let encoded_payload = BridgeRialtoMessageLane::outbound_message_payload(lane, nonce)?; - let decoded_payload = rialto_messages::ToRialtoMessagePayload::decode( - &mut &encoded_payload[..] - ).ok()?; - Some((nonce, decoded_payload.weight, encoded_payload.len() as _)) - }) - .collect() - } - - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::outbound_latest_received_nonce(lane) - } - - fn latest_generated_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::outbound_latest_generated_nonce(lane) - } - } - - impl bp_rialto::FromRialtoInboundLaneApi for Runtime { - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::inbound_latest_received_nonce(lane) - } - - fn latest_confirmed_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeRialtoMessageLane::inbound_latest_confirmed_nonce(lane) - } - - fn unrewarded_relayers_state(lane: bp_message_lane::LaneId) -> bp_message_lane::UnrewardedRelayersState { - BridgeRialtoMessageLane::inbound_unrewarded_relayers_state(lane) - } - } -} - -/// Rialto account ownership digest from Millau. -/// -/// The byte vector returned by this function should be signed with a Rialto account private key. -/// This way, the owner of `millau_account_id` on Millau proves that the Rialto account private key -/// is also under his control. -pub fn rialto_account_ownership_digest( - rialto_call: &Call, - millau_account_id: AccountId, - rialto_spec_version: SpecVersion, -) -> sp_std::vec::Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_call_dispatch::account_ownership_digest( - rialto_call, - millau_account_id, - rialto_spec_version, - bp_runtime::MILLAU_BRIDGE_INSTANCE, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use bridge_runtime_common::messages; - - #[test] - fn ensure_millau_message_lane_weights_are_correct() { - // TODO: https://github.com/paritytech/parity-bridges-common/issues/390 - type Weights = pallet_message_lane::weights::RialtoWeight; - - pallet_message_lane::ensure_weights_are_correct::( - bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, - bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, - bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - ); - - let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add( - messages::target::maximal_incoming_message_size(bp_millau::max_extrinsic_size()), - ); - pallet_message_lane::ensure_able_to_receive_message::( - bp_millau::max_extrinsic_size(), - bp_millau::max_extrinsic_weight(), - max_incoming_message_proof_size, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_message_proof_size as _, - 0, - ), - messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()), - ); - - let max_incoming_inbound_lane_data_proof_size = bp_message_lane::InboundLaneData::<()>::encoded_size_hint( - bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, - ) - .unwrap_or(u32::MAX); - pallet_message_lane::ensure_able_to_receive_confirmation::( - bp_millau::max_extrinsic_size(), - bp_millau::max_extrinsic_weight(), - max_incoming_inbound_lane_data_proof_size, - bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_inbound_lane_data_proof_size as _, - 0, - ), - ); - } -} diff --git a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs deleted file mode 100644 index 9775c93d2d0c665086380dfdf92dc669fb2fb522..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to serve Millau <-> Rialto message lanes. - -use crate::Runtime; - -use bp_message_lane::{ - source_chain::TargetHeaderChain, - target_chain::{ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessageLaneParameter, -}; -use bp_runtime::{InstanceId, RIALTO_BRIDGE_INSTANCE}; -use bridge_runtime_common::messages::{self, ChainWithMessageLanes, MessageBridge}; -use codec::{Decode, Encode}; -use frame_support::{ - parameter_types, - weights::{DispatchClass, Weight, WeightToFeePolynomial}, - RuntimeDebug, -}; -use sp_core::storage::StorageKey; -use sp_runtime::{FixedPointNumber, FixedU128}; -use sp_std::{convert::TryFrom, ops::RangeInclusive}; - -parameter_types! { - /// Rialto to Millau conversion rate. Initially we treat both tokens as equal. - storage RialtoToMillauConversionRate: FixedU128 = 1.into(); -} - -/// Storage key of the Millau -> Rialto message in the runtime storage. -pub fn message_key(lane: &LaneId, nonce: MessageNonce) -> StorageKey { - pallet_message_lane::storage_keys::message_key::::MessageLaneInstance>( - lane, nonce, - ) -} - -/// Storage key of the Millau -> Rialto message lane state in the runtime storage. -pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::outbound_lane_data_key::<::MessageLaneInstance>( - lane, - ) -} - -/// Storage key of the Rialto -> Millau message lane state in the runtime storage. -pub fn inbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::inbound_lane_data_key::< - Runtime, - ::MessageLaneInstance, - >(lane) -} - -/// Message payload for Millau -> Rialto messages. -pub type ToRialtoMessagePayload = messages::source::FromThisChainMessagePayload; - -/// Message verifier for Millau -> Rialto messages. -pub type ToRialtoMessageVerifier = messages::source::FromThisChainMessageVerifier; - -/// Message payload for Rialto -> Millau messages. -pub type FromRialtoMessagePayload = messages::target::FromBridgedChainMessagePayload; - -/// Encoded Millau Call as it comes from Rialto. -pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; - -/// Messages proof for Rialto -> Millau messages. -type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof; - -/// Messages delivery proof for Millau -> Rialto messages. -type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof; - -/// Call-dispatch based message dispatch for Rialto -> Millau messages. -pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch< - WithRialtoMessageBridge, - crate::Runtime, - pallet_bridge_call_dispatch::DefaultInstance, ->; - -/// Millau <-> Rialto message bridge. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct WithRialtoMessageBridge; - -impl MessageBridge for WithRialtoMessageBridge { - const INSTANCE: InstanceId = RIALTO_BRIDGE_INSTANCE; - - const RELAYER_FEE_PERCENT: u32 = 10; - - type ThisChain = Millau; - type BridgedChain = Rialto; - - fn maximal_extrinsic_size_on_target_chain() -> u32 { - bp_rialto::max_extrinsic_size() - } - - fn weight_limits_of_message_on_bridged_chain(_message_payload: &[u8]) -> RangeInclusive { - // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); - - // we're charging for payload bytes in `WithRialtoMessageBridge::weight_of_delivery_transaction` function - // - // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about - // minimal dispatch weight here - - 0..=upper_limit - } - - fn weight_of_delivery_transaction(message_payload: &[u8]) -> Weight { - let message_payload_len = u32::try_from(message_payload.len()) - .map(Into::into) - .unwrap_or(Weight::MAX); - let extra_bytes_in_payload = - message_payload_len.saturating_sub(pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); - messages::transaction_weight_without_multiplier( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - message_payload_len.saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE as _), - extra_bytes_in_payload - .saturating_mul(bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) - .saturating_add(bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT), - ) - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - let inbounded_data_size: Weight = - InboundLaneData::::encoded_size_hint(bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) - .map(Into::into) - .unwrap_or(Weight::MAX); - - messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - inbounded_data_size.saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE as _), - bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - ) - } - - fn this_weight_to_this_balance(weight: Weight) -> bp_millau::Balance { - ::WeightToFee::calc(&weight) - } - - fn bridged_weight_to_bridged_balance(weight: Weight) -> bp_rialto::Balance { - // we're using the same weights in both chains now - ::WeightToFee::calc(&weight) as _ - } - - fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance { - bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance)) - .unwrap_or(bp_millau::Balance::MAX) - } -} - -/// Millau chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Millau; - -impl messages::ChainWithMessageLanes for Millau { - type Hash = bp_millau::Hash; - type AccountId = bp_millau::AccountId; - type Signer = bp_millau::AccountSigner; - type Signature = bp_millau::Signature; - type Call = crate::Call; - type Weight = Weight; - type Balance = bp_millau::Balance; - - type MessageLaneInstance = pallet_message_lane::DefaultInstance; -} - -impl messages::ThisChainWithMessageLanes for Millau { - fn is_outbound_lane_enabled(lane: &LaneId) -> bool { - *lane == LaneId::default() - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - MessageNonce::MAX - } -} - -/// Rialto chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Rialto; - -impl messages::ChainWithMessageLanes for Rialto { - type Hash = bp_rialto::Hash; - type AccountId = bp_rialto::AccountId; - type Signer = bp_rialto::AccountSigner; - type Signature = bp_rialto::Signature; - type Call = (); // unknown to us - type Weight = Weight; - type Balance = bp_rialto::Balance; - - type MessageLaneInstance = pallet_message_lane::DefaultInstance; -} - -impl TargetHeaderChain for Rialto { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof or one or several keys; - // - id of the lane we prove state of. - type MessagesDeliveryProof = ToRialtoMessagesDeliveryProof; - - fn verify_message(payload: &ToRialtoMessagePayload) -> Result<(), Self::Error> { - messages::source::verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - messages::source::verify_messages_delivery_proof::(proof) - } -} - -impl SourceHeaderChain for Rialto { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof or one or several keys; - // - id of the lane we prove messages for; - // - inclusive range of messages nonces that are proved. - type MessagesProof = FromRialtoMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result>, Self::Error> { - messages::target::verify_messages_proof::(proof, messages_count) - } -} - -/// Millau -> Rialto message lane pallet parameters. -#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum MillauToRialtoMessageLaneParameter { - /// The conversion formula we use is: `MillauTokens = RialtoTokens * conversion_rate`. - RialtoToMillauConversionRate(FixedU128), -} - -impl MessageLaneParameter for MillauToRialtoMessageLaneParameter { - fn save(&self) { - match *self { - MillauToRialtoMessageLaneParameter::RialtoToMillauConversionRate(ref conversion_rate) => { - RialtoToMillauConversionRate::set(conversion_rate) - } - } - } -} diff --git a/polkadot/bridges/bin/rialto/node/Cargo.toml b/polkadot/bridges/bin/rialto/node/Cargo.toml deleted file mode 100644 index f99178c77aeaf73c596ee058906478e47685b5d9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/node/Cargo.toml +++ /dev/null @@ -1,59 +0,0 @@ -[package] -name = "rialto-bridge-node" -description = "Substrate node compatible with Rialto runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -build = "build.rs" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -jsonrpc-core = "15.1.0" -structopt = "0.3.21" - -# Bridge dependencies - -bp-message-lane = { path = "../../../primitives/message-lane" } -bp-runtime = { path = "../../../primitives/runtime" } -bp-rialto = { path = "../../../primitives/rialto" } -pallet-message-lane = { path = "../../../modules/message-lane" } -pallet-message-lane-rpc = { path = "../../../modules/message-lane/rpc" } -rialto-runtime = { path = "../runtime" } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[build-dependencies] -build-script-utils = { package = "substrate-build-script-utils", version = "2.0" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -vergen = "3.1.0" - -[features] -default = [] -runtime-benchmarks = [ - "rialto-runtime/runtime-benchmarks", -] diff --git a/polkadot/bridges/bin/rialto/node/build.rs b/polkadot/bridges/bin/rialto/node/build.rs deleted file mode 100644 index e9a10ff8ad009ae29752bd6db4fc03b6bebc977e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/node/build.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use vergen::{generate_cargo_keys, ConstantsFlags}; - -const ERROR_MSG: &str = "Failed to generate metadata files"; - -fn main() { - generate_cargo_keys(ConstantsFlags::SHA_SHORT).expect(ERROR_MSG); - - build_script_utils::rerun_if_git_head_changed(); -} diff --git a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs deleted file mode 100644 index 00a73cd4457ed4b0006494b35c28a59fa492204a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use bp_rialto::derive_account_from_millau_id; -use rialto_runtime::{ - AccountId, AuraConfig, BalancesConfig, BridgeKovanConfig, BridgeMillauConfig, BridgeRialtoPoAConfig, GenesisConfig, - GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, -}; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{sr25519, Pair, Public}; -use sp_finality_grandpa::AuthorityId as GrandpaId; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec; - -/// The chain specification option. This is expected to come in from the CLI and -/// is little more than one of a number of alternatives which can easily be converted -/// from a string (`--chain=...`) into a `ChainSpec`. -#[derive(Clone, Debug)] -pub enum Alternative { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob/Charlie/Dave/Eve auths. - LocalTestnet, -} - -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -type AccountPublic = ::Signer; - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) { - ( - get_account_id_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - ) -} - -impl Alternative { - /// Get an actual chain config from one of the alternatives. - pub(crate) fn load(self) -> ChainSpec { - match self { - Alternative::Development => ChainSpec::from_genesis( - "Development", - "dev", - sc_service::ChainType::Development, - || { - testnet_genesis( - vec![get_authority_keys_from_seed("Alice")], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ) - }, - vec![], - None, - None, - None, - None, - ), - Alternative::LocalTestnet => ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - sc_service::ChainType::Local, - || { - testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - get_authority_keys_from_seed("Charlie"), - get_authority_keys_from_seed("Dave"), - get_authority_keys_from_seed("Eve"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("George"), - get_account_id_from_seed::("Harry"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - get_account_id_from_seed::("George//stash"), - get_account_id_from_seed::("Harry//stash"), - pallet_message_lane::Module::::relayer_fund_account_id(), - derive_account_from_millau_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Dave"), - )), - ], - true, - ) - }, - vec![], - None, - None, - None, - None, - ), - } - } -} - -fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys { - SessionKeys { aura, grandpa } -} - -fn testnet_genesis( - initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>, - root_key: AccountId, - endowed_accounts: Vec, - _enable_println: bool, -) -> GenesisConfig { - GenesisConfig { - frame_system: Some(SystemConfig { - code: WASM_BINARY.to_vec(), - changes_trie_config: Default::default(), - }), - pallet_balances: Some(BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), - }), - pallet_aura: Some(AuraConfig { - authorities: Vec::new(), - }), - pallet_bridge_eth_poa_Instance1: Some(load_rialto_poa_bridge_config()), - pallet_bridge_eth_poa_Instance2: Some(load_kovan_bridge_config()), - pallet_grandpa: Some(GrandpaConfig { - authorities: Vec::new(), - }), - pallet_substrate_bridge: Some(BridgeMillauConfig { - // We'll initialize the pallet with a dispatchable instead. - init_data: None, - owner: Some(root_key.clone()), - }), - pallet_sudo: Some(SudoConfig { key: root_key }), - pallet_session: Some(SessionConfig { - keys: initial_authorities - .iter() - .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) - .collect::>(), - }), - } -} - -fn load_rialto_poa_bridge_config() -> BridgeRialtoPoAConfig { - BridgeRialtoPoAConfig { - initial_header: rialto_runtime::rialto_poa::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::rialto_poa::genesis_validators(), - } -} - -fn load_kovan_bridge_config() -> BridgeKovanConfig { - BridgeKovanConfig { - initial_header: rialto_runtime::kovan::genesis_header(), - initial_difficulty: 0.into(), - initial_validators: rialto_runtime::kovan::genesis_validators(), - } -} - -#[test] -fn derived_dave_account_is_as_expected() { - let dave = get_account_id_from_seed::("Dave"); - let derived: AccountId = derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave)); - assert_eq!( - derived.to_string(), - "5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string() - ); -} diff --git a/polkadot/bridges/bin/rialto/node/src/cli.rs b/polkadot/bridges/bin/rialto/node/src/cli.rs deleted file mode 100644 index 1149c4f910c8dd1ec9eaa7cdc0f041aae5ae70bb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/node/src/cli.rs +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use sc_cli::RunCmd; -use structopt::StructOpt; - -#[derive(Debug, StructOpt)] -pub struct Cli { - #[structopt(subcommand)] - pub subcommand: Option, - - #[structopt(flatten)] - pub run: RunCmd, -} - -/// Possible subcommands of the main binary. -#[derive(Debug, StructOpt)] -pub enum Subcommand { - /// Key management cli utilities - Key(sc_cli::KeySubcommand), - /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. - Verify(sc_cli::VerifyCmd), - - /// Generate a seed that provides a vanity address. - Vanity(sc_cli::VanityCmd), - - /// Sign a message, with a given (secret) key. - Sign(sc_cli::SignCmd), - - /// Build a chain specification. - BuildSpec(sc_cli::BuildSpecCmd), - - /// Validate blocks. - CheckBlock(sc_cli::CheckBlockCmd), - - /// Export blocks. - ExportBlocks(sc_cli::ExportBlocksCmd), - - /// Export the state of a given block into a chain spec. - ExportState(sc_cli::ExportStateCmd), - - /// Import blocks. - ImportBlocks(sc_cli::ImportBlocksCmd), - - /// Remove the whole chain. - PurgeChain(sc_cli::PurgeChainCmd), - - /// Revert the chain to a previous state. - Revert(sc_cli::RevertCmd), - - /// The custom benchmark subcommmand benchmarking runtime pallets. - #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] - Benchmark(frame_benchmarking_cli::BenchmarkCmd), -} diff --git a/polkadot/bridges/bin/rialto/node/src/command.rs b/polkadot/bridges/bin/rialto/node/src/command.rs deleted file mode 100644 index 8242c9eaaba0342cb2af8c09ed193c4edd461efe..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/node/src/command.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::{Cli, Subcommand}; -use crate::service; -use crate::service::new_partial; -use rialto_runtime::Block; -use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; -use sc_service::PartialComponents; - -impl SubstrateCli for Cli { - fn impl_name() -> String { - "Rialto Bridge Node".into() - } - - fn impl_version() -> String { - env!("CARGO_PKG_VERSION").into() - } - - fn description() -> String { - "Rialto Bridge Node".into() - } - - fn author() -> String { - "Parity Technologies".into() - } - - fn support_url() -> String { - "https://github.com/paritytech/parity-bridges-common/".into() - } - - fn copyright_start_year() -> i32 { - 2019 - } - - fn executable_name() -> String { - "rialto-bridge-node".into() - } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &rialto_runtime::VERSION - } - - fn load_spec(&self, id: &str) -> Result, String> { - Ok(Box::new( - match id { - "" | "dev" => crate::chain_spec::Alternative::Development, - "local" => crate::chain_spec::Alternative::LocalTestnet, - _ => return Err(format!("Unsupported chain specification: {}", id)), - } - .load(), - )) - } -} - -/// Parse and run command line arguments -pub fn run() -> sc_cli::Result<()> { - let cli = Cli::from_args(); - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( - rialto_runtime::SS58Prefix::get() as u16, - )); - - match &cli.subcommand { - Some(Subcommand::Benchmark(cmd)) => { - if cfg!(feature = "runtime-benchmarks") { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run::(config)) - } else { - println!( - "Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - ); - Ok(()) - } - } - Some(Subcommand::Key(cmd)) => cmd.run(&cli), - Some(Subcommand::Sign(cmd)) => cmd.run(), - Some(Subcommand::Verify(cmd)) => cmd.run(), - Some(Subcommand::Vanity(cmd)) => cmd.run(), - Some(Subcommand::BuildSpec(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) - } - Some(Subcommand::CheckBlock(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - } - Some(Subcommand::ExportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; - Ok((cmd.run(client, config.database), task_manager)) - }) - } - Some(Subcommand::ExportState(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, task_manager, .. - } = new_partial(&config)?; - Ok((cmd.run(client, config.chain_spec), task_manager)) - }) - } - Some(Subcommand::ImportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - import_queue, - .. - } = new_partial(&config)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - } - Some(Subcommand::PurgeChain(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.database)) - } - Some(Subcommand::Revert(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { - client, - task_manager, - backend, - .. - } = new_partial(&config)?; - Ok((cmd.run(client, backend), task_manager)) - }) - } - None => { - let runner = cli.create_runner(&cli.run)?; - runner - .run_node_until_exit(|config| async move { - match config.role { - Role::Light => service::new_light(config), - _ => service::new_full(config), - } - }) - .map_err(sc_cli::Error::Service) - } - } -} diff --git a/polkadot/bridges/bin/rialto/node/src/main.rs b/polkadot/bridges/bin/rialto/node/src/main.rs deleted file mode 100644 index 164afae2bb90db49c7abefdee5bbbdecf2545908..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/node/src/main.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto bridge node. - -#![warn(missing_docs)] - -mod chain_spec; -#[macro_use] -mod service; -mod cli; -mod command; - -/// Run the Rialto Node -fn main() -> sc_cli::Result<()> { - command::run() -} diff --git a/polkadot/bridges/bin/rialto/node/src/service.rs b/polkadot/bridges/bin/rialto/node/src/service.rs deleted file mode 100644 index 67ca185137c0af293d4c794704cc1bd9be33c4ca..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/node/src/service.rs +++ /dev/null @@ -1,433 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== -// UPDATE GUIDE: -// 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); -// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs; -// 3) fix compilation errors; -// 4) test :) -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== - -use rialto_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{ExecutorProvider, RemoteBackend}; -use sc_executor::native_executor_instance; -pub use sc_executor::NativeExecutor; -use sc_finality_grandpa::SharedVoterState; -use sc_keystore::LocalKeystore; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; -use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -use sp_inherents::InherentDataProviders; -use std::sync::Arc; -use std::time::Duration; - -// Our native executor instance. -native_executor_instance!( - pub Executor, - rialto_runtime::api::dispatch, - rialto_runtime::native_version, - frame_benchmarking::benchmarking::HostFunctions, -); - -type FullClient = sc_service::TFullClient; -type FullBackend = sc_service::TFullBackend; -type FullSelectChain = sc_consensus::LongestChain; - -#[allow(clippy::type_complexity)] -pub fn new_partial( - config: &Configuration, -) -> Result< - sc_service::PartialComponents< - FullClient, - FullBackend, - FullSelectChain, - sp_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - sc_consensus_aura::AuraBlockImport< - Block, - FullClient, - sc_finality_grandpa::GrandpaBlockImport, - AuraPair, - >, - sc_finality_grandpa::LinkHalf, - ), - >, - ServiceError, -> { - if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); - } - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(&config)?; - let client = Arc::new(client); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_handle(), - client.clone(), - ); - - let (grandpa_block_import, grandpa_link) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain.clone())?; - - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import.clone(), - Some(Box::new(grandpa_block_import)), - client.clone(), - inherent_data_providers.clone(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - )?; - - Ok(sc_service::PartialComponents { - client, - backend, - task_manager, - import_queue, - keystore_container, - select_chain, - transaction_pool, - inherent_data_providers, - other: (aura_block_import, grandpa_link), - }) -} - -fn remote_keystore(_url: &str) -> Result, &'static str> { - // FIXME: here would the concrete keystore be built, - // must return a concrete type (NOT `LocalKeystore`) that - // implements `CryptoStore` and `SyncCryptoStore` - Err("Remote Keystore not supported.") -} - -/// Builds a new service for a full client. -pub fn new_full(mut config: Configuration) -> Result { - let sc_service::PartialComponents { - client, - backend, - mut task_manager, - import_queue, - mut keystore_container, - select_chain, - transaction_pool, - inherent_data_providers, - other: (block_import, grandpa_link), - } = new_partial(&config)?; - - if let Some(url) = &config.keystore_remote { - match remote_keystore(url) { - Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => { - return Err(ServiceError::Other(format!( - "Error hooking up remote keystore for {}: {}", - url, e - ))) - } - }; - } - - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); - - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: None, - block_announce_validator_builder: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let role = config.role.clone(); - let force_authoring = config.force_authoring; - let backoff_authoring_blocks: Option<()> = None; - let name = config.network.node_name.clone(); - let enable_grandpa = !config.disable_grandpa; - let prometheus_registry = config.prometheus_registry().cloned(); - - let rpc_extensions_builder = { - use bp_message_lane::{LaneId, MessageNonce}; - use bp_runtime::{InstanceId, MILLAU_BRIDGE_INSTANCE}; - use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; - use sp_core::storage::StorageKey; - - // This struct is here to ease update process. - - /// Rialto runtime from message-lane RPC point of view. - struct RialtoMessageLaneKeys; - - impl pallet_message_lane_rpc::Runtime for RialtoMessageLaneKeys { - fn message_key(&self, instance: &InstanceId, lane: &LaneId, nonce: MessageNonce) -> Option { - match *instance { - MILLAU_BRIDGE_INSTANCE => Some(rialto_runtime::millau_messages::message_key(lane, nonce)), - _ => None, - } - } - - fn outbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - MILLAU_BRIDGE_INSTANCE => Some(rialto_runtime::millau_messages::outbound_lane_data_key(lane)), - _ => None, - } - } - - fn inbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option { - match *instance { - MILLAU_BRIDGE_INSTANCE => Some(rialto_runtime::millau_messages::inbound_lane_data_key(lane)), - _ => None, - } - } - } - - use pallet_message_lane_rpc::{MessageLaneApi, MessageLaneRpcHandler}; - use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; - use sc_rpc::DenyUnsafe; - use substrate_frame_rpc_system::{FullSystem, SystemApi}; - - let backend = backend.clone(); - let client = client.clone(); - let pool = transaction_pool.clone(); - - let justification_stream = grandpa_link.justification_stream(); - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); - - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), Some(shared_authority_set.clone())); - - Box::new(move |_, subscription_executor| { - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with(SystemApi::to_delegate(FullSystem::new( - client.clone(), - pool.clone(), - DenyUnsafe::No, - ))); - io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( - shared_authority_set.clone(), - shared_voter_state.clone(), - justification_stream.clone(), - subscription_executor, - finality_proof_provider.clone(), - ))); - io.extend_with(MessageLaneApi::to_delegate(MessageLaneRpcHandler::new( - backend.clone(), - Arc::new(RialtoMessageLaneKeys), - ))); - - io - }) - }; - - let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, - backend, - network_status_sinks, - system_rpc_tx, - config, - telemetry_span: None, - })?; - - if role.is_authority() { - let proposer = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry.as_ref(), - ); - - let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - client.clone(), - select_chain, - block_import, - proposer, - network.clone(), - inherent_data_providers, - force_authoring, - backoff_authoring_blocks, - keystore_container.sync_keystore(), - can_author_with, - )?; - - // the AURA authoring task is considered essential, i.e. if it - // fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking("aura", aura); - } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(keystore_container.sync_keystore()) - } else { - None - }; - - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - is_authority: role.is_authority(), - }; - - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: non-authorities could run the GRANDPA observer protocol, but at - // this point the full voter should provide better guarantees of block - // and vote data availability than the observer. The observer has not - // been tested extensively yet and having most nodes in a network run it - // could lead to finality stalls. - let grandpa_config = sc_finality_grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network, - telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), - voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), - prometheus_registry, - shared_voter_state: SharedVoterState::empty(), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - task_manager - .spawn_essential_handle() - .spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?); - } - - network_starter.start_network(); - Ok(task_manager) -} - -/// Builds a new service for a light client. -pub fn new_light(mut config: Configuration) -> Result { - let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; - - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config()); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( - config.transaction_pool.clone(), - config.prometheus_registry(), - task_manager.spawn_handle(), - client.clone(), - on_demand.clone(), - )); - - let (grandpa_block_import, _) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; - - let aura_block_import = - sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import)), - client.clone(), - InherentDataProviders::new(), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - sp_consensus::NeverCanAuthor, - )?; - - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - backend.clone(), - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - remote_blockchain: Some(backend.remote_blockchain()), - transaction_pool, - task_manager: &mut task_manager, - on_demand: Some(on_demand), - rpc_extensions_builder: Box::new(|_, _| ()), - config, - client, - keystore: keystore_container.sync_keystore(), - backend, - network, - network_status_sinks, - system_rpc_tx, - telemetry_span: None, - })?; - - network_starter.start_network(); - - Ok(task_manager) -} diff --git a/polkadot/bridges/bin/rialto/runtime/Cargo.toml b/polkadot/bridges/bin/rialto/runtime/Cargo.toml deleted file mode 100644 index 517ddff9cb67bd659af5e8ee492b9010ca332a86..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/Cargo.toml +++ /dev/null @@ -1,129 +0,0 @@ -[package] -name = "rialto-runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -hex-literal = "0.3" -libsecp256k1 = { version = "0.3.4", optional = true, default-features = false, features = ["hmac"] } -serde = { version = "1.0.123", optional = true, features = ["derive"] } - -# Bridge dependencies - -bp-currency-exchange = { path = "../../../primitives/currency-exchange", default-features = false } -bp-eth-poa = { path = "../../../primitives/ethereum-poa", default-features = false } -bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } -bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } -bp-message-lane = { path = "../../../primitives/message-lane", default-features = false } -bp-millau = { path = "../../../primitives/millau", default-features = false } -bp-rialto = { path = "../../../primitives/rialto", default-features = false } -bp-runtime = { path = "../../../primitives/runtime", default-features = false } -bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-eth-poa = { path = "../../../modules/ethereum", default-features = false } -pallet-bridge-call-dispatch = { path = "../../../modules/call-dispatch", default-features = false } -pallet-bridge-currency-exchange = { path = "../../../modules/currency-exchange", default-features = false } -pallet-finality-verifier = { path = "../../../modules/finality-verifier", default-features = false } -pallet-substrate-bridge = { path = "../../../modules/substrate", default-features = false } -pallet-message-lane = { path = "../../../modules/message-lane", default-features = false } -pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -libsecp256k1 = { version = "0.3.4", features = ["hmac"] } - -[build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } - -[features] -default = ["std"] -std = [ - "bp-currency-exchange/std", - "bp-eth-poa/std", - "bp-header-chain/std", - "bp-message-dispatch/std", - "bp-message-lane/std", - "bp-millau/std", - "bp-rialto/std", - "bp-runtime/std", - "bridge-runtime-common/std", - "codec/std", - "frame-benchmarking/std", - "frame-executive/std", - "frame-support/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "pallet-aura/std", - "pallet-balances/std", - "pallet-bridge-eth-poa/std", - "pallet-bridge-call-dispatch/std", - "pallet-bridge-currency-exchange/std", - "pallet-finality-verifier/std", - "pallet-grandpa/std", - "pallet-message-lane/std", - "pallet-randomness-collective-flip/std", - "pallet-shift-session-manager/std", - "pallet-substrate-bridge/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment/std", - "serde", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-finality-grandpa/std", - "sp-inherents/std", - "sp-io/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-transaction-pool/std", - "sp-trie/std", - "sp-version/std", -] -runtime-benchmarks = [ - "bridge-runtime-common/runtime-benchmarks", - "frame-benchmarking", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "libsecp256k1", - "pallet-bridge-currency-exchange/runtime-benchmarks", - "pallet-bridge-eth-poa/runtime-benchmarks", - "pallet-message-lane/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] diff --git a/polkadot/bridges/bin/rialto/runtime/build.rs b/polkadot/bridges/bin/rialto/runtime/build.rs deleted file mode 100644 index 4fda040c9bd14fecac3ba095b50994fd9b9cf691..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/build.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use wasm_builder_runner::WasmBuilder; - -fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates("1.0.11") - .export_heap_base() - .import_memory() - .build() -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/benches.rs b/polkadot/bridges/bin/rialto/runtime/src/benches.rs deleted file mode 100644 index 4ca476e5f3ca6a5f3278355c511988e039367424..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/benches.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! We want to use a different validator configuration for benchmarking than what's used in Kovan -//! or in our Rialto test network. However, we can't configure a new validator set on the fly which -//! means we need to wire the runtime together like this - -use pallet_bridge_eth_poa::{ValidatorsConfiguration, ValidatorsSource}; -use sp_std::vec; - -pub use crate::kovan::{ - genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval, PruningStrategy, -}; - -frame_support::parameter_types! { - pub BridgeValidatorsConfiguration: pallet_bridge_eth_poa::ValidatorsConfiguration = bench_validator_config(); -} - -fn bench_validator_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (1, ValidatorsSource::Contract([3; 20].into(), vec![[1; 20].into()])), - ]) -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs b/polkadot/bridges/bin/rialto/runtime/src/exchange.rs deleted file mode 100644 index 926d9595372a04689b838571e9791e42d8607dcb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Support for PoA -> Substrate native tokens exchange. -//! -//! If you want to exchange native PoA tokens for native Substrate -//! chain tokens, you need to: -//! 1) send some PoA tokens to `LOCK_FUNDS_ADDRESS` address on PoA chain. Data field of -//! the transaction must be SCALE-encoded id of Substrate account that will receive -//! funds on Substrate chain; -//! 2) wait until the 'lock funds' transaction is mined on PoA chain; -//! 3) wait until the block containing the 'lock funds' transaction is finalized on PoA chain; -//! 4) wait until the required PoA header and its finality are provided -//! to the PoA -> Substrate bridge module (it can be provided by you); -//! 5) receive tokens by providing proof-of-inclusion of PoA transaction. - -use bp_currency_exchange::{ - Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction, Result as ExchangeResult, -}; -use bp_eth_poa::{transaction_decode_rlp, RawTransaction, RawTransactionReceipt}; -use codec::{Decode, Encode}; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use sp_std::vec::Vec; - -/// Ethereum address where locked PoA funds must be sent to. -pub const LOCK_FUNDS_ADDRESS: [u8; 20] = hex!("DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"); - -/// Ethereum transaction inclusion proof. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct EthereumTransactionInclusionProof { - /// Hash of the block with transaction. - pub block: sp_core::H256, - /// Index of the transaction within the block. - pub index: u64, - /// The proof itself (right now it is all RLP-encoded transactions of the block + - /// RLP-encoded receipts of all transactions of the block). - pub proof: Vec<(RawTransaction, RawTransactionReceipt)>, -} - -/// We uniquely identify transfer by the pair (sender, nonce). -/// -/// The assumption is that this pair will never appear more than once in -/// transactions included into finalized blocks. This is obviously true -/// for any existing eth-like chain (that keep current tx format), because -/// otherwise transaction can be replayed over and over. -#[derive(Encode, Decode, PartialEq, RuntimeDebug)] -pub struct EthereumTransactionTag { - /// Account that has locked funds. - pub account: [u8; 20], - /// Lock transaction nonce. - pub nonce: sp_core::U256, -} - -/// Eth transaction from runtime perspective. -pub struct EthTransaction; - -impl MaybeLockFundsTransaction for EthTransaction { - type Transaction = RawTransaction; - type Id = EthereumTransactionTag; - type Recipient = crate::AccountId; - type Amount = crate::Balance; - - fn parse( - raw_tx: &Self::Transaction, - ) -> ExchangeResult> { - let tx = transaction_decode_rlp(raw_tx).map_err(|_| ExchangeError::InvalidTransaction)?; - - // we only accept transactions sending funds directly to the pre-configured address - if tx.unsigned.to != Some(LOCK_FUNDS_ADDRESS.into()) { - frame_support::debug::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid peer recipient: {:?}", - tx.unsigned.to, - ); - - return Err(ExchangeError::InvalidTransaction); - } - - let mut recipient_raw = sp_core::H256::default(); - match tx.unsigned.payload.len() { - 32 => recipient_raw.as_fixed_bytes_mut().copy_from_slice(&tx.unsigned.payload), - len => { - frame_support::debug::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid recipient length: {}", - len, - ); - - return Err(ExchangeError::InvalidRecipient); - } - } - let amount = tx.unsigned.value.low_u128(); - - if tx.unsigned.value != amount.into() { - frame_support::debug::trace!( - target: "runtime", - "Failed to parse fund locks transaction. Invalid amount: {}", - tx.unsigned.value, - ); - - return Err(ExchangeError::InvalidAmount); - } - - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: *tx.sender.as_fixed_bytes(), - nonce: tx.unsigned.nonce, - }, - recipient: crate::AccountId::from(*recipient_raw.as_fixed_bytes()), - amount, - }) - } -} - -/// Prepares everything required to bench claim of funds locked by given transaction. -#[cfg(feature = "runtime-benchmarks")] -pub(crate) fn prepare_environment_for_claim, I: frame_support::traits::Instance>( - transactions: &[(RawTransaction, RawTransactionReceipt)], -) -> bp_eth_poa::H256 { - use bp_eth_poa::compute_merkle_root; - use pallet_bridge_eth_poa::{ - test_utils::{insert_dummy_header, validator_utils::validator, HeaderBuilder}, - BridgeStorage, Storage, - }; - - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent_number_on_runtime::(0) - .transactions_root(compute_merkle_root(transactions.iter().map(|(tx, _)| tx))) - .receipts_root(compute_merkle_root(transactions.iter().map(|(_, receipt)| receipt))) - .sign_by(&validator(0)); - let header_id = header.compute_id(); - insert_dummy_header(&mut storage, header); - storage.finalize_and_prune_headers(Some(header_id), 0); - - header_id.hash -} - -/// Prepare signed ethereum lock-funds transaction. -#[cfg(any(feature = "runtime-benchmarks", test))] -pub(crate) fn prepare_ethereum_transaction( - recipient: &crate::AccountId, - editor: impl Fn(&mut bp_eth_poa::UnsignedTransaction), -) -> (RawTransaction, RawTransactionReceipt) { - use bp_eth_poa::{signatures::SignTransaction, Receipt, TransactionOutcome}; - - // prepare tx for OpenEthereum private dev chain: - // chain id is 0x11 - // sender secret is 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - let chain_id = 0x11; - let signer = secp256k1::SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .unwrap(); - let recipient_raw: &[u8; 32] = recipient.as_ref(); - let mut eth_tx = bp_eth_poa::UnsignedTransaction { - nonce: 0.into(), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: 100.into(), - gas: 100_000.into(), - gas_price: 100_000.into(), - payload: recipient_raw.to_vec(), - }; - editor(&mut eth_tx); - ( - eth_tx.sign_by(&signer, Some(chain_id)), - Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - fn ferdie() -> crate::AccountId { - hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c").into() - } - - #[test] - fn valid_transaction_accepted() { - assert_eq!( - EthTransaction::parse(&prepare_ethereum_transaction(&ferdie(), |_| {}).0), - Ok(LockFundsTransaction { - id: EthereumTransactionTag { - account: hex!("00a329c0648769a73afac7f9381e08fb43dbea72"), - nonce: 0.into(), - }, - recipient: ferdie(), - amount: 100, - }), - ); - } - - #[test] - fn invalid_transaction_rejected() { - assert_eq!( - EthTransaction::parse(&Vec::new()), - Err(ExchangeError::InvalidTransaction), - ); - } - - #[test] - fn transaction_with_invalid_peer_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.to = None; - }) - .0 - ), - Err(ExchangeError::InvalidTransaction), - ); - } - - #[test] - fn transaction_with_invalid_recipient_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.payload.clear(); - }) - .0 - ), - Err(ExchangeError::InvalidRecipient), - ); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - assert_eq!( - EthTransaction::parse( - &prepare_ethereum_transaction(&ferdie(), |tx| { - tx.value = sp_core::U256::from(u128::max_value()) + sp_core::U256::from(1); - }) - .0 - ), - Err(ExchangeError::InvalidAmount), - ); - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs b/polkadot/bridges/bin/rialto/runtime/src/kovan.rs deleted file mode 100644 index fa76347db251e11724407cb1a2a51fa3c51fbfc6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy, ValidatorsConfiguration, - ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(16); - pub BridgeAuraConfiguration: AuraConfiguration = - kovan_aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - kovan_validators_configuration(); -} - -/// Max number of finalized headers to keep. It is equivalent of ~24 hours of -/// finalized blocks on current Kovan chain. -const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000; - -/// Aura engine configuration for Kovan chain. -pub fn kovan_aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::max_value(), - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Kovan chain. -pub fn kovan_validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(genesis_validators())), - ( - 10960440, - ValidatorsSource::List(vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ]), - ), - ( - 10960500, - ValidatorsSource::Contract( - hex!("aE71807C1B0a093cB1547b682DC78316D945c9B8").into(), - vec![ - hex!("d05f7478c6aa10781258c5cc8b4f385fc8fa989c").into(), - hex!("03801efb0efe2a25ede5dd3a003ae880c0292e4d").into(), - hex!("a4df255ecf08bbf2c28055c65225c9a9847abd94").into(), - hex!("596e8221a30bfe6e7eff67fee664a01c73ba3c56").into(), - hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - ], - ), - ), - ]) -} - -/// Genesis validators set of Kovan chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), - hex!("00427feae2419c15b89d1c21af10d1b6650a4d3d").into(), - hex!("4Ed9B08e6354C70fE6F8CB0411b0d3246b424d6c").into(), - hex!("0020ee4Be0e2027d76603cB751eE069519bA81A1").into(), - hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), - hex!("007733a1FE69CF3f2CF989F81C7b4cAc1693387A").into(), - hex!("00E6d2b931F55a3f1701c7389d592a7778897879").into(), - hex!("00e4a10650e5a6D6001C38ff8E64F97016a1645c").into(), - hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), - ] -} - -/// Genesis header of the Kovan chain. -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), - extra_data: vec![], - state_root: hex!("2480155b48a1cea17d67dbfdfaafe821c1d19cdd478c5358e8ec56dec24502b2").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 6000000.into(), - difficulty: 131072.into(), - seal: vec![ - vec![128], - vec![ - 184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - ], - } -} - -/// Kovan headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl BridgePruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// PoA Header timestamp verification against `Timestamp` pallet. -#[derive(Default, RuntimeDebug)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Kovan Blockchain as seen by the runtime. -pub struct KovanBlockchain; - -impl InclusionProofVerifier for KovanBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { - let is_transaction_finalized = - crate::BridgeKovan::verify_transaction_finalized(proof.block, proof.index, &proof.proof); - - if !is_transaction_finalized { - return None; - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 0, - "10_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 20_000), - 0, - "20_000 <= 20_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 30_000), - 10_000, - "20_000 <= 30_000 => we're ready to prune first 10_000 headers", - ); - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/lib.rs b/polkadot/bridges/bin/rialto/runtime/src/lib.rs deleted file mode 100644 index 033e4f9f5961553bb00e06bc8227e2bffd6224a4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/lib.rs +++ /dev/null @@ -1,1150 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The Rialto runtime. This can be compiled with `#[no_std]`, ready for Wasm. - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] -// Runtime-generated DecodeLimit::decode_all_With_depth_limit -#![allow(clippy::unnecessary_mut_passed)] -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -pub mod exchange; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benches; -pub mod kovan; -pub mod millau_messages; -pub mod rialto_poa; - -use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; - -use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge}; -use codec::Decode; -use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use sp_api::impl_runtime_apis; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, MultiSignature, MultiSigner, -}; -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -// A few exports that help ease life for downstream crates. -pub use frame_support::{ - construct_runtime, parameter_types, - traits::{Currency, ExistenceRequirement, Imbalance, KeyOwnerProofSystem, Randomness}, - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, RuntimeDbWeight, Weight}, - StorageValue, -}; - -pub use frame_system::Call as SystemCall; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_bridge_currency_exchange::Call as BridgeCurrencyExchangeCall; -pub use pallet_bridge_eth_poa::Call as BridgeEthPoACall; -pub use pallet_message_lane::Call as MessageLaneCall; -pub use pallet_substrate_bridge::Call as BridgeMillauCall; -pub use pallet_sudo::Call as SudoCall; -pub use pallet_timestamp::Call as TimestampCall; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; - -/// An index to a block. -pub type BlockNumber = bp_rialto::BlockNumber; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = bp_rialto::Signature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = bp_rialto::AccountId; - -/// The type for looking up accounts. We don't expect more than 4 billion of them, but you -/// never know... -pub type AccountIndex = u32; - -/// Balance of an account. -pub type Balance = bp_rialto::Balance; - -/// Index of a transaction in the chain. -pub type Index = u32; - -/// A hash of some data used by the chain. -pub type Hash = bp_rialto::Hash; - -/// Hashing algorithm used by the chain. -pub type Hashing = bp_rialto::Hasher; - -/// Digest item type. -pub type DigestItem = generic::DigestItem; - -/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know -/// the specifics of the runtime. They can then be made to be agnostic over specific formats -/// of data like extrinsics, allowing for them to continue syncing the network through upgrades -/// to even the core data structures. -pub mod opaque { - use super::*; - - pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; - - /// Opaque block header type. - pub type Header = generic::Header; - /// Opaque block type. - pub type Block = generic::Block; - /// Opaque block identifier type. - pub type BlockId = generic::BlockId; -} - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - pub grandpa: Grandpa, - } -} - -/// This runtime version. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("rialto-runtime"), - impl_name: create_runtime_str!("rialto-runtime"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } -} - -parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const Version: RuntimeVersion = VERSION; - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 60_000_000, // ~0.06 ms = ~60 µs - write: 200_000_000, // ~0.2 ms = 200 µs - }; - pub const SS58Prefix: u8 = 48; -} - -impl frame_system::Config for Runtime { - /// The basic call filter to use in dispatchable. - type BaseCallFilter = (); - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; - /// The index type for storing how many extrinsics an account has signed. - type Index = Index; - /// The index type for blocks. - type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = Hashing; - /// The header type. - type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous origin type. - type Origin = Origin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Version of the runtime. - type Version = Version; - /// Provides information about the pallet setup in the runtime. - type PalletInfo = PalletInfo; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); - /// Block and extrinsics weights: base values and limits. - type BlockWeights = bp_rialto::BlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = bp_rialto::BlockLength; - /// The weight of database operations that the runtime can invoke. - type DbWeight = DbWeight; - /// The designated SS58 prefix of this chain. - type SS58Prefix = SS58Prefix; - type OnSetCode = (); -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; -} - -type RialtoPoA = pallet_bridge_eth_poa::Instance1; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = rialto_poa::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = rialto_poa::FinalityVotesCachingInterval; - type ValidatorsConfiguration = rialto_poa::BridgeValidatorsConfiguration; - type PruningStrategy = rialto_poa::PruningStrategy; - type ChainTime = rialto_poa::ChainTime; - type OnHeadersSubmitted = (); -} - -type Kovan = pallet_bridge_eth_poa::Instance2; -impl pallet_bridge_eth_poa::Config for Runtime { - type AuraConfiguration = kovan::BridgeAuraConfiguration; - type FinalityVotesCachingInterval = kovan::FinalityVotesCachingInterval; - type ValidatorsConfiguration = kovan::BridgeValidatorsConfiguration; - type PruningStrategy = kovan::PruningStrategy; - type ChainTime = kovan::ChainTime; - type OnHeadersSubmitted = (); -} - -type RialtoCurrencyExchange = pallet_bridge_currency_exchange::Instance1; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = rialto_poa::RialtoBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; -} - -type KovanCurrencyExchange = pallet_bridge_currency_exchange::Instance2; -impl pallet_bridge_currency_exchange::Config for Runtime { - type OnTransactionSubmitted = (); - type PeerBlockchain = kovan::KovanBlockchain; - type PeerMaybeLockFundsTransaction = exchange::EthTransaction; - type RecipientsMap = bp_currency_exchange::IdentityRecipients; - type Amount = Balance; - type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; - type DepositInto = DepositInto; -} - -impl pallet_bridge_call_dispatch::Config for Runtime { - type Event = Event; - type MessageId = (bp_message_lane::LaneId, bp_message_lane::MessageNonce); - type Call = Call; - type CallFilter = (); - type EncodedCall = crate::millau_messages::FromMillauEncodedCall; - type SourceChainAccountId = bp_millau::AccountId; - type TargetChainAccountPublic = MultiSigner; - type TargetChainSignature = MultiSignature; - type AccountIdConverter = bp_rialto::AccountIdConverter; -} - -pub struct DepositInto; - -impl bp_currency_exchange::DepositInto for DepositInto { - type Recipient = AccountId; - type Amount = Balance; - - fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> { - // let balances module make all checks for us (it won't allow depositing lower than existential - // deposit, balance overflow, ...) - let deposited = as Currency>::deposit_creating(&recipient, amount); - - // I'm dropping deposited here explicitly to illustrate the fact that it'll update `TotalIssuance` - // on drop - let deposited_amount = deposited.peek(); - drop(deposited); - - // we have 3 cases here: - // - deposited == amount: success - // - deposited == 0: deposit has failed and no changes to storage were made - // - deposited != 0: (should never happen in practice) deposit has been partially completed - match deposited_amount { - _ if deposited_amount == amount => { - frame_support::debug::trace!( - target: "runtime", - "Deposited {} to {:?}", - amount, - recipient, - ); - - Ok(()) - } - _ if deposited_amount == 0 => { - frame_support::debug::error!( - target: "runtime", - "Deposit of {} to {:?} has failed", - amount, - recipient, - ); - - Err(bp_currency_exchange::Error::DepositFailed) - } - _ => { - frame_support::debug::error!( - target: "runtime", - "Deposit of {} to {:?} has partially competed. {} has been deposited", - amount, - recipient, - deposited_amount, - ); - - // we can't return DepositFailed error here, because storage changes were made - Err(bp_currency_exchange::Error::DepositPartiallyFailed) - } - } - } -} - -impl pallet_grandpa::Config for Runtime { - type Event = Event; - type Call = Call; - type KeyOwnerProofSystem = (); - type KeyOwnerProof = >::Proof; - type KeyOwnerIdentification = - >::IdentificationTuple; - type HandleEquivocation = (); - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -parameter_types! { - pub const MinimumPeriod: u64 = bp_rialto::SLOT_DURATION / 2; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = MinimumPeriod; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: bp_rialto::Balance = 500; - // For weight estimation, we assume that the most locks on an individual account will be 50. - // This number may need to be adjusted in the future if this assumption no longer holds true. - pub const MaxLocks: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); - type MaxLocks = MaxLocks; -} - -parameter_types! { - pub const TransactionBaseFee: Balance = 0; - pub const TransactionByteFee: Balance = 1; -} - -impl pallet_transaction_payment::Config for Runtime { - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = IdentityFee; - type FeeMultiplierUpdate = (); -} - -impl pallet_sudo::Config for Runtime { - type Event = Event; - type Call = Call; -} - -parameter_types! { - pub const Period: BlockNumber = bp_rialto::SESSION_LENGTH; - pub const Offset: BlockNumber = 0; -} - -impl pallet_session::Config for Runtime { - type Event = Event; - type ValidatorId = ::AccountId; - type ValidatorIdOf = (); - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = pallet_shift_session_manager::Module; - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type DisabledValidatorsThreshold = (); - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -impl pallet_substrate_bridge::Config for Runtime { - type BridgedChain = bp_millau::Millau; -} - -parameter_types! { - // This is a pretty unscientific cap. - // - // Note that once this is hit the pallet will essentially throttle incoming requests down to one - // call per block. - pub const MaxRequests: u32 = 50; -} - -impl pallet_finality_verifier::Config for Runtime { - type BridgedChain = bp_millau::Millau; - type HeaderChain = pallet_substrate_bridge::Module; - type AncestryProof = Vec; - type AncestryChecker = bp_header_chain::LinearAncestryChecker; - type MaxRequests = MaxRequests; -} - -impl pallet_shift_session_manager::Config for Runtime {} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: bp_message_lane::MessageNonce = 8; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_message_lane::MessageNonce = - bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_message_lane::MessageNonce = - bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE; - // `IdentityFee` is used by Rialto => we may use weight directly - pub const GetDeliveryConfirmationTransactionFee: Balance = - bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; - pub const RootAccountForPayments: Option = None; -} - -pub(crate) type WithMillauMessageLaneInstance = pallet_message_lane::DefaultInstance; -impl pallet_message_lane::Config for Runtime { - type Event = Event; - type WeightInfo = pallet_message_lane::weights::RialtoWeight; - type Parameter = millau_messages::RialtoToMillauMessageLaneParameter; - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = crate::millau_messages::ToMillauMessagePayload; - type OutboundMessageFee = Balance; - - type InboundPayload = crate::millau_messages::FromMillauMessagePayload; - type InboundMessageFee = bp_millau::Balance; - type InboundRelayer = bp_millau::AccountId; - - type AccountIdConverter = bp_rialto::AccountIdConverter; - - type TargetHeaderChain = crate::millau_messages::Millau; - type LaneMessageVerifier = crate::millau_messages::ToMillauMessageVerifier; - type MessageDeliveryAndDispatchPayment = pallet_message_lane::instant_payments::InstantCurrencyPayments< - Runtime, - pallet_balances::Pallet, - GetDeliveryConfirmationTransactionFee, - RootAccountForPayments, - >; - - type SourceHeaderChain = crate::millau_messages::Millau; - type MessageDispatch = crate::millau_messages::FromMillauMessageDispatch; -} - -construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - BridgeRialtoPoA: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeKovan: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, - BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, - BridgeMillau: pallet_substrate_bridge::{Pallet, Call, Storage, Config}, - BridgeFinalityVerifier: pallet_finality_verifier::{Pallet, Call}, - BridgeCallDispatch: pallet_bridge_call_dispatch::{Pallet, Event}, - BridgeMillauMessageLane: pallet_message_lane::{Pallet, Call, Storage, Event}, - System: frame_system::{Pallet, Call, Config, Storage, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Aura: pallet_aura::{Pallet, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, - Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - ShiftSessionManager: pallet_shift_session_manager::{Pallet}, - } -); - -/// The address format for describing accounts. -pub type Address = AccountId; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); -/// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -/// Executive: handles dispatch to the various modules. -pub type Executive = - frame_executive::Executive, Runtime, AllPallets>; - -impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - - fn random_seed() -> ::Hash { - RandomnessCollectiveFlip::random_seed().0 - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { - System::account_nonce(account) - } - } - - impl bp_eth_poa::RialtoPoAHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeRialtoPoA::best_block(); - (best_block.number, best_block.hash) - } - - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeRialtoPoA::finalized_block(); - (finalized_block.number, finalized_block.hash) - } - - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeRialtoPoA::is_import_requires_receipts(header) - } - - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeRialtoPoA::is_known_block(hash) - } - } - - impl bp_eth_poa::KovanHeaderApi for Runtime { - fn best_block() -> (u64, bp_eth_poa::H256) { - let best_block = BridgeKovan::best_block(); - (best_block.number, best_block.hash) - } - - fn finalized_block() -> (u64, bp_eth_poa::H256) { - let finalized_block = BridgeKovan::finalized_block(); - (finalized_block.number, finalized_block.hash) - } - - fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { - BridgeKovan::is_import_requires_receipts(header) - } - - fn is_known_block(hash: bp_eth_poa::H256) -> bool { - BridgeKovan::is_known_block(hash) - } - } - - impl bp_millau::MillauHeaderApi for Runtime { - fn best_blocks() -> Vec<(bp_millau::BlockNumber, bp_millau::Hash)> { - BridgeMillau::best_headers() - } - - fn finalized_block() -> (bp_millau::BlockNumber, bp_millau::Hash) { - let header = BridgeMillau::best_finalized(); - (header.number, header.hash()) - } - - fn incomplete_headers() -> Vec<(bp_millau::BlockNumber, bp_millau::Hash)> { - BridgeMillau::require_justifications() - } - - fn is_known_block(hash: bp_millau::Hash) -> bool { - BridgeMillau::is_known_header(hash) - } - - fn is_finalized_block(hash: bp_millau::Hash) -> bool { - BridgeMillau::is_finalized_header(hash) - } - } - - impl bp_currency_exchange::RialtoCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeRialtoCurrencyExchange::filter_transaction_proof(&proof) - } - } - - impl bp_currency_exchange::KovanCurrencyExchangeApi for Runtime { - fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { - BridgeKovanCurrencyExchange::filter_transaction_proof(&proof) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() - } - - fn authorities() -> Vec { - Aura::authorities() - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn grandpa_authorities() -> GrandpaAuthorityList { - Grandpa::grandpa_authorities() - } - - fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: fg_primitives::EquivocationProof< - ::Hash, - NumberFor, - >, - key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, - ) -> Option<()> { - let key_owner_proof = key_owner_proof.decode()?; - - Grandpa::submit_unsigned_equivocation_report( - equivocation_proof, - key_owner_proof, - ) - } - - fn generate_key_ownership_proof( - _set_id: fg_primitives::SetId, - _authority_id: GrandpaId, - ) -> Option { - // NOTE: this is the only implementation possible since we've - // defined our key owner proof type as a bottom type (i.e. a type - // with no values). - None - } - } - - impl bp_millau::ToMillauOutboundLaneApi for Runtime { - fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_message_lane::LaneId, - payload: ToMillauMessagePayload, - ) -> Option { - estimate_message_dispatch_and_delivery_fee::( - &payload, - WithMillauMessageBridge::RELAYER_FEE_PERCENT, - ).ok() - } - - fn messages_dispatch_weight( - lane: bp_message_lane::LaneId, - begin: bp_message_lane::MessageNonce, - end: bp_message_lane::MessageNonce, - ) -> Vec<(bp_message_lane::MessageNonce, Weight, u32)> { - (begin..=end).filter_map(|nonce| { - let encoded_payload = BridgeMillauMessageLane::outbound_message_payload(lane, nonce)?; - let decoded_payload = millau_messages::ToMillauMessagePayload::decode( - &mut &encoded_payload[..] - ).ok()?; - Some((nonce, decoded_payload.weight, encoded_payload.len() as _)) - }) - .collect() - } - - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::outbound_latest_received_nonce(lane) - } - - fn latest_generated_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::outbound_latest_generated_nonce(lane) - } - } - - impl bp_millau::FromMillauInboundLaneApi for Runtime { - fn latest_received_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::inbound_latest_received_nonce(lane) - } - - fn latest_confirmed_nonce(lane: bp_message_lane::LaneId) -> bp_message_lane::MessageNonce { - BridgeMillauMessageLane::inbound_latest_confirmed_nonce(lane) - } - - fn unrewarded_relayers_state(lane: bp_message_lane::LaneId) -> bp_message_lane::UnrewardedRelayersState { - BridgeMillauMessageLane::inbound_unrewarded_relayers_state(lane) - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig, - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, TrackedStorageKey, add_benchmark}; - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - // Caller 0 Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - - use pallet_bridge_currency_exchange::benchmarking::{ - Module as BridgeCurrencyExchangeBench, - Config as BridgeCurrencyExchangeConfig, - ProofParams as BridgeCurrencyExchangeProofParams, - }; - - impl BridgeCurrencyExchangeConfig for Runtime { - fn make_proof( - proof_params: BridgeCurrencyExchangeProofParams, - ) -> crate::exchange::EthereumTransactionInclusionProof { - use bp_currency_exchange::DepositInto; - - if proof_params.recipient_exists { - >::DepositInto::deposit_into( - proof_params.recipient.clone(), - ExistentialDeposit::get(), - ).unwrap(); - } - - let (transaction, receipt) = crate::exchange::prepare_ethereum_transaction( - &proof_params.recipient, - |tx| { - // our runtime only supports transactions where data is exactly 32 bytes long - // (receiver key) - // => we are ignoring `transaction_size_factor` here - tx.value = (ExistentialDeposit::get() * 10).into(); - }, - ); - let transactions = sp_std::iter::repeat((transaction, receipt)) - .take(1 + proof_params.proof_size_factor as usize) - .collect::>(); - let block_hash = crate::exchange::prepare_environment_for_claim::(&transactions); - crate::exchange::EthereumTransactionInclusionProof { - block: block_hash, - index: 0, - proof: transactions, - } - } - } - - use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; - use bridge_runtime_common::messages; - use pallet_message_lane::benchmarking::{ - Module as MessageLaneBench, - Config as MessageLaneConfig, - MessageDeliveryProofParams as MessageLaneMessageDeliveryProofParams, - MessageParams as MessageLaneMessageParams, - MessageProofParams as MessageLaneMessageProofParams, - ProofSize as MessageLaneProofSize, - }; - - impl MessageLaneConfig for Runtime { - fn maximal_message_size() -> u32 { - messages::source::maximal_message_size::() - } - - fn bridged_relayer_id() -> Self::InboundRelayer { - Default::default() - } - - fn account_balance(account: &Self::AccountId) -> Self::OutboundMessageFee { - pallet_balances::Pallet::::free_balance(account) - } - - fn endow_account(account: &Self::AccountId) { - pallet_balances::Pallet::::make_free_balance_be( - account, - Balance::MAX / 100, - ); - } - - fn prepare_outbound_message( - params: MessageLaneMessageParams, - ) -> (millau_messages::ToMillauMessagePayload, Balance) { - let message_payload = vec![0; params.size as usize]; - let dispatch_origin = pallet_bridge_call_dispatch::CallOrigin::SourceAccount( - params.sender_account, - ); - - let message = ToMillauMessagePayload { - spec_version: 0, - weight: params.size as _, - origin: dispatch_origin, - call: message_payload, - }; - (message, pallet_message_lane::benchmarking::MESSAGE_FEE.into()) - } - - fn prepare_message_proof( - params: MessageLaneMessageProofParams, - ) -> (millau_messages::FromMillauMessagesProof, Weight) { - use crate::millau_messages::{Millau, WithMillauMessageBridge}; - use bp_message_lane::MessageKey; - use bridge_runtime_common::{ - messages::ChainWithMessageLanes, - messages_benchmarking::{ed25519_sign, prepare_message_proof}, - }; - use codec::Encode; - use frame_support::weights::GetDispatchInfo; - use pallet_message_lane::storage_keys; - use sp_runtime::traits::Header; - - let remark = match params.size { - MessageLaneProofSize::Minimal(ref size) => vec![0u8; *size as _], - _ => vec![], - }; - let call = Call::System(SystemCall::remark(remark)); - let call_weight = call.get_dispatch_info().weight; - - let millau_account_id: bp_millau::AccountId = Default::default(); - let (rialto_raw_public, rialto_raw_signature) = ed25519_sign( - &call, - &millau_account_id, - ); - let rialto_public = MultiSigner::Ed25519(sp_core::ed25519::Public::from_raw(rialto_raw_public)); - let rialto_signature = MultiSignature::Ed25519(sp_core::ed25519::Signature::from_raw( - rialto_raw_signature, - )); - - let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key::< - Runtime, - ::MessageLaneInstance, - >( - &message_key.lane_id, message_key.nonce, - ).0; - let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key::< - ::MessageLaneInstance, - >( - &lane_id, - ).0; - let make_millau_header = |state_root| bp_millau::Header::new( - 0, - Default::default(), - state_root, - Default::default(), - Default::default(), - ); - - prepare_message_proof::( - params, - make_millau_message_key, - make_millau_outbound_lane_data_key, - make_millau_header, - call_weight, - pallet_bridge_call_dispatch::MessagePayload { - spec_version: VERSION.spec_version, - weight: call_weight, - origin: pallet_bridge_call_dispatch::CallOrigin::< - bp_millau::AccountId, - MultiSigner, - Signature, - >::TargetAccount( - millau_account_id, - rialto_public, - rialto_signature, - ), - call: call.encode(), - }.encode(), - ) - } - - fn prepare_message_delivery_proof( - params: MessageLaneMessageDeliveryProofParams, - ) -> millau_messages::ToMillauMessagesDeliveryProof { - use crate::millau_messages::{Millau, WithMillauMessageBridge}; - use bridge_runtime_common::{ - messages::ChainWithMessageLanes, - messages_benchmarking::prepare_message_delivery_proof, - }; - use sp_runtime::traits::Header; - - prepare_message_delivery_proof::( - params, - |lane_id| pallet_message_lane::storage_keys::inbound_lane_data_key::< - Runtime, - ::MessageLaneInstance, - >( - &lane_id, - ).0, - |state_root| bp_millau::Header::new( - 0, - Default::default(), - state_root, - Default::default(), - Default::default(), - ), - ) - } - } - - add_benchmark!(params, batches, pallet_bridge_eth_poa, BridgeKovan); - add_benchmark!( - params, - batches, - pallet_bridge_currency_exchange, - BridgeCurrencyExchangeBench:: - ); - add_benchmark!( - params, - batches, - pallet_message_lane, - MessageLaneBench:: - ); - - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok(batches) - } - } -} - -/// Millau account ownership digest from Rialto. -/// -/// The byte vector returned by this function should be signed with a Millau account private key. -/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private key -/// is also under his control. -pub fn millau_account_ownership_digest( - millau_call: &Call, - rialto_account_id: AccountId, - millau_spec_version: SpecVersion, -) -> sp_std::vec::Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_call_dispatch::account_ownership_digest( - millau_call, - rialto_account_id, - millau_spec_version, - bp_runtime::RIALTO_BRIDGE_INSTANCE, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_currency_exchange::DepositInto; - use bridge_runtime_common::messages; - - fn run_deposit_into_test(test: impl Fn(AccountId) -> Balance) { - let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::().unwrap().into(); - ext.execute_with(|| { - // initially issuance is zero - assert_eq!( - as Currency>::total_issuance(), - 0, - ); - - // create account - let account: AccountId = [1u8; 32].into(); - let initial_amount = ExistentialDeposit::get(); - let deposited = - as Currency>::deposit_creating(&account, initial_amount); - drop(deposited); - assert_eq!( - as Currency>::total_issuance(), - initial_amount, - ); - assert_eq!( - as Currency>::free_balance(&account), - initial_amount, - ); - - // run test - let total_issuance_change = test(account); - - // check that total issuance has changed by `run_deposit_into_test` - assert_eq!( - as Currency>::total_issuance(), - initial_amount + total_issuance_change, - ); - }); - } - - #[test] - fn ensure_rialto_message_lane_weights_are_correct() { - type Weights = pallet_message_lane::weights::RialtoWeight; - - pallet_message_lane::ensure_weights_are_correct::( - bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, - bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, - bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - ); - - let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add( - messages::target::maximal_incoming_message_size(bp_rialto::max_extrinsic_size()), - ); - pallet_message_lane::ensure_able_to_receive_message::( - bp_rialto::max_extrinsic_size(), - bp_rialto::max_extrinsic_weight(), - max_incoming_message_proof_size, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_message_proof_size as _, - 0, - ), - messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()), - ); - - let max_incoming_inbound_lane_data_proof_size = bp_message_lane::InboundLaneData::<()>::encoded_size_hint( - bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, - ) - .unwrap_or(u32::MAX); - pallet_message_lane::ensure_able_to_receive_confirmation::( - bp_rialto::max_extrinsic_size(), - bp_rialto::max_extrinsic_weight(), - max_incoming_inbound_lane_data_proof_size, - bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, - bridge_runtime_common::messages::transaction_weight_without_multiplier( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - max_incoming_inbound_lane_data_proof_size as _, - 0, - ), - ); - } - - #[test] - fn deposit_into_existing_account_works() { - run_deposit_into_test(|existing_account| { - let initial_amount = - as Currency>::free_balance(&existing_account); - let additional_amount = 10_000; - >::DepositInto::deposit_into( - existing_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance(&existing_account), - initial_amount + additional_amount, - ); - additional_amount - }); - } - - #[test] - fn deposit_into_new_account_works() { - run_deposit_into_test(|_| { - let initial_amount = 0; - let additional_amount = ExistentialDeposit::get() + 10_000; - let new_account: AccountId = [42u8; 32].into(); - >::DepositInto::deposit_into( - new_account.clone(), - additional_amount, - ) - .unwrap(); - assert_eq!( - as Currency>::free_balance(&new_account), - initial_amount + additional_amount, - ); - additional_amount - }); - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs deleted file mode 100644 index 9fb57ee861b6429e1f66a34c82fcabe8c7bb0649..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to serve Millau <-> Rialto message lanes. - -use crate::Runtime; - -use bp_message_lane::{ - source_chain::TargetHeaderChain, - target_chain::{ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessageLaneParameter, -}; -use bp_runtime::{InstanceId, MILLAU_BRIDGE_INSTANCE}; -use bridge_runtime_common::messages::{self, ChainWithMessageLanes, MessageBridge}; -use codec::{Decode, Encode}; -use frame_support::{ - parameter_types, - weights::{DispatchClass, Weight, WeightToFeePolynomial}, - RuntimeDebug, -}; -use sp_core::storage::StorageKey; -use sp_runtime::{FixedPointNumber, FixedU128}; -use sp_std::{convert::TryFrom, ops::RangeInclusive}; - -parameter_types! { - /// Millau to Rialto conversion rate. Initially we treat both tokens as equal. - storage MillauToRialtoConversionRate: FixedU128 = 1.into(); -} - -/// Storage key of the Rialto -> Millau message in the runtime storage. -pub fn message_key(lane: &LaneId, nonce: MessageNonce) -> StorageKey { - pallet_message_lane::storage_keys::message_key::::MessageLaneInstance>( - lane, nonce, - ) -} - -/// Storage key of the Rialto -> Millau message lane state in the runtime storage. -pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::outbound_lane_data_key::<::MessageLaneInstance>( - lane, - ) -} - -/// Storage key of the Millau -> Rialto message lane state in the runtime storage. -pub fn inbound_lane_data_key(lane: &LaneId) -> StorageKey { - pallet_message_lane::storage_keys::inbound_lane_data_key::< - Runtime, - ::MessageLaneInstance, - >(lane) -} - -/// Message payload for Rialto -> Millau messages. -pub type ToMillauMessagePayload = messages::source::FromThisChainMessagePayload; - -/// Message verifier for Rialto -> Millau messages. -pub type ToMillauMessageVerifier = messages::source::FromThisChainMessageVerifier; - -/// Message payload for Millau -> Rialto messages. -pub type FromMillauMessagePayload = messages::target::FromBridgedChainMessagePayload; - -/// Encoded Rialto Call as it comes from Millau. -pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; - -/// Call-dispatch based message dispatch for Millau -> Rialto messages. -pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDispatch< - WithMillauMessageBridge, - crate::Runtime, - pallet_bridge_call_dispatch::DefaultInstance, ->; - -/// Messages proof for Millau -> Rialto messages. -pub type FromMillauMessagesProof = messages::target::FromBridgedChainMessagesProof; - -/// Messages delivery proof for Rialto -> Millau messages. -pub type ToMillauMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof; - -/// Millau <-> Rialto message bridge. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct WithMillauMessageBridge; - -impl MessageBridge for WithMillauMessageBridge { - const INSTANCE: InstanceId = MILLAU_BRIDGE_INSTANCE; - - const RELAYER_FEE_PERCENT: u32 = 10; - - type ThisChain = Rialto; - type BridgedChain = Millau; - - fn maximal_extrinsic_size_on_target_chain() -> u32 { - bp_millau::max_extrinsic_size() - } - - fn weight_limits_of_message_on_bridged_chain(_message_payload: &[u8]) -> RangeInclusive { - // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()); - - // we're charging for payload bytes in `WithMillauMessageBridge::weight_of_delivery_transaction` function - // - // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about - // minimal dispatch weight here - - 0..=upper_limit - } - - fn weight_of_delivery_transaction(message_payload: &[u8]) -> Weight { - let message_payload_len = u32::try_from(message_payload.len()) - .map(Into::into) - .unwrap_or(Weight::MAX); - let extra_bytes_in_payload = - message_payload_len.saturating_sub(pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); - messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - message_payload_len.saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE as _), - extra_bytes_in_payload - .saturating_mul(bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) - .saturating_add(bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT), - ) - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - let inbounded_data_size: Weight = - InboundLaneData::::encoded_size_hint(bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) - .map(Into::into) - .unwrap_or(Weight::MAX); - - messages::transaction_weight_without_multiplier( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - inbounded_data_size.saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE as _), - bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - ) - } - - fn this_weight_to_this_balance(weight: Weight) -> bp_rialto::Balance { - ::WeightToFee::calc(&weight) - } - - fn bridged_weight_to_bridged_balance(weight: Weight) -> bp_millau::Balance { - // we're using the same weights in both chains now - ::WeightToFee::calc(&weight) as _ - } - - fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance { - bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance)) - .unwrap_or(bp_rialto::Balance::MAX) - } -} - -/// Rialto chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Rialto; - -impl messages::ChainWithMessageLanes for Rialto { - type Hash = bp_rialto::Hash; - type AccountId = bp_rialto::AccountId; - type Signer = bp_rialto::AccountSigner; - type Signature = bp_rialto::Signature; - type Call = crate::Call; - type Weight = Weight; - type Balance = bp_rialto::Balance; - - type MessageLaneInstance = crate::WithMillauMessageLaneInstance; -} - -impl messages::ThisChainWithMessageLanes for Rialto { - fn is_outbound_lane_enabled(lane: &LaneId) -> bool { - *lane == LaneId::default() - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - MessageNonce::MAX - } -} - -/// Millau chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Millau; - -impl messages::ChainWithMessageLanes for Millau { - type Hash = bp_millau::Hash; - type AccountId = bp_millau::AccountId; - type Signer = bp_millau::AccountSigner; - type Signature = bp_millau::Signature; - type Call = (); // unknown to us - type Weight = Weight; - type Balance = bp_millau::Balance; - - type MessageLaneInstance = pallet_message_lane::DefaultInstance; -} - -impl TargetHeaderChain for Millau { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof of one or several keys; - // - id of the lane we prove state of. - type MessagesDeliveryProof = ToMillauMessagesDeliveryProof; - - fn verify_message(payload: &ToMillauMessagePayload) -> Result<(), Self::Error> { - messages::source::verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - messages::source::verify_messages_delivery_proof::(proof) - } -} - -impl SourceHeaderChain for Millau { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof of one or several keys; - // - id of the lane we prove messages for; - // - inclusive range of messages nonces that are proved. - type MessagesProof = FromMillauMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result>, Self::Error> { - messages::target::verify_messages_proof::(proof, messages_count) - } -} - -/// Rialto -> Millau message lane pallet parameters. -#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum RialtoToMillauMessageLaneParameter { - /// The conversion formula we use is: `RialtoTokens = MillauTokens * conversion_rate`. - MillauToRialtoConversionRate(FixedU128), -} - -impl MessageLaneParameter for RialtoToMillauMessageLaneParameter { - fn save(&self) { - match *self { - RialtoToMillauMessageLaneParameter::MillauToRialtoConversionRate(ref conversion_rate) => { - MillauToRialtoConversionRate::set(conversion_rate) - } - } - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs b/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs deleted file mode 100644 index 54ac8e25713a018f9d8cdb53e1767302fc0e0558..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Configuration parameters for the Rialto PoA chain. - -use crate::exchange::EthereumTransactionInclusionProof; - -use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::RuntimeDebug; -use hex_literal::hex; -use pallet_bridge_eth_poa::{ - AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy, ValidatorsConfiguration, - ValidatorsSource, -}; -use sp_std::prelude::*; - -frame_support::parameter_types! { - pub const FinalityVotesCachingInterval: Option = Some(8); - pub BridgeAuraConfiguration: AuraConfiguration = - aura_configuration(); - pub BridgeValidatorsConfiguration: ValidatorsConfiguration = - validators_configuration(); -} - -/// Max number of finalized headers to keep. -const FINALIZED_HEADERS_TO_KEEP: u64 = 5_000; - -/// Aura engine configuration for Rialto chain. -pub fn aura_configuration() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: 0xfffffffff, - strict_empty_steps_transition: 0, - validate_step_transition: 0, - validate_score_transition: 0, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration for Rialto PoA chain. -pub fn validators_configuration() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(genesis_validators())) -} - -/// Genesis validators set of Rialto PoA chain. -pub fn genesis_validators() -> Vec
{ - vec![ - hex!("005e714f896a8b7cede9d38688c1a81de72a58e4").into(), - hex!("007594304039c2937a12220338aab821d819f5a4").into(), - hex!("004e7a39907f090e19b0b80a277e77b72b22e269").into(), - ] -} - -/// Genesis header of the Rialto PoA chain. -/// -/// To obtain genesis header from a running node, invoke: -/// ```bash -/// $ http localhost:8545 jsonrpc=2.0 id=1 method=eth_getBlockByNumber params:='["earliest", false]' -v -/// ``` -pub fn genesis_header() -> AuraHeader { - AuraHeader { - parent_hash: Default::default(), - timestamp: 0, - number: 0, - author: Default::default(), - transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), - extra_data: vec![], - state_root: hex!("a992d04c791620ed7ed96555a80cf0568355bb4bee2656f46899a4372f25f248").into(), - receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), - log_bloom: Default::default(), - gas_used: Default::default(), - gas_limit: 0x222222.into(), - difficulty: 0x20000.into(), - seal: vec![vec![0x80], { - let mut vec = vec![0xb8, 0x41]; - vec.resize(67, 0); - vec - }], - } -} - -/// Rialto PoA headers pruning strategy. -/// -/// We do not prune unfinalized headers because exchange module only accepts -/// claims from finalized headers. And if we're pruning unfinalized headers, then -/// some claims may never be accepted. -#[derive(Default, RuntimeDebug)] -pub struct PruningStrategy; - -impl TPruningStrategy for PruningStrategy { - fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { - best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) - } -} - -/// ChainTime provider -#[derive(Default)] -pub struct ChainTime; - -impl TChainTime for ChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = super::Timestamp::now(); - timestamp > now - } -} - -/// The Rialto PoA Blockchain as seen by the runtime. -pub struct RialtoBlockchain; - -impl InclusionProofVerifier for RialtoBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = EthereumTransactionInclusionProof; - - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { - let is_transaction_finalized = - crate::BridgeRialtoPoA::verify_transaction_finalized(proof.block, proof.index, &proof.proof); - - if !is_transaction_finalized { - return None; - } - - proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn genesis_hash_matches() { - assert_eq!( - genesis_header().compute_hash(), - hex!("1468e1a0fa20d30025a5a0f87e1cced4fdc393b84b7d2850b11ca5863db482cb").into(), - ); - } - - #[test] - fn pruning_strategy_keeps_enough_headers() { - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 1_000), - 0, - "1_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 5_000), - 0, - "5_000 <= 5_000 => nothing should be pruned yet", - ); - - assert_eq!( - PruningStrategy::default().pruning_upper_bound(100_000, 10_000), - 5_000, - "5_000 <= 10_000 => we're ready to prune first 5_000 headers", - ); - } -} diff --git a/polkadot/bridges/bin/runtime-common/Cargo.toml b/polkadot/bridges/bin/runtime-common/Cargo.toml deleted file mode 100644 index 69b618e7daa5bd604b5348cc3392498a31de5031..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/runtime-common/Cargo.toml +++ /dev/null @@ -1,56 +0,0 @@ -[package] -name = "bridge-runtime-common" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -ed25519-dalek = { version = "1.0", default-features = false, optional = true } -hash-db = { version = "0.15.2", default-features = false } - -# Bridge dependencies - -bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } -bp-message-lane = { path = "../../primitives/message-lane", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-call-dispatch = { path = "../../modules/call-dispatch", default-features = false } -pallet-message-lane = { path = "../../modules/message-lane", default-features = false } -pallet-substrate-bridge = { path = "../../modules/substrate", default-features = false } - -# Substrate dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[features] -default = ["std"] -std = [ - "bp-message-dispatch/std", - "bp-message-lane/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "hash-db/std", - "pallet-bridge-call-dispatch/std", - "pallet-message-lane/std", - "pallet-substrate-bridge/std", - "sp-core/std", - "sp-runtime/std", - "sp-state-machine/std", - "sp-std/std", - "sp-trie/std", -] -runtime-benchmarks = [ - "ed25519-dalek/u64_backend", - "pallet-message-lane/runtime-benchmarks", - "pallet-substrate-bridge/runtime-benchmarks", - "sp-state-machine", -] diff --git a/polkadot/bridges/bin/runtime-common/README.md b/polkadot/bridges/bin/runtime-common/README.md deleted file mode 100644 index 58fe92c9ca0dd7090cb8753187880c441ce3b037..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/runtime-common/README.md +++ /dev/null @@ -1,183 +0,0 @@ -# Helpers for Message Lane Module Integration - -The [`messages`](./src/messages.rs) module of this crate contains a bunch of helpers for integrating -message lane module into your runtime. Basic prerequisites of these helpers are: -- we're going to bridge Substrate-based chain with another Substrate-based chain; -- both chains have [message lane module](../../modules/message-lane/README.md), Substrate bridge - module and the [call dispatch module](../../modules/call-dispatch/README.md); -- all message lanes are identical and may be used to transfer the same messages; -- the messages sent over the bridge are dispatched using - [call dispatch module](../../modules/call-dispatch/README.md); -- the messages are `pallet_bridge_call_dispatch::MessagePayload` structures, where `call` field is - encoded `Call` of the target chain. This means that the `Call` is opaque to the - [message lane module](../../modules/message-lane/README.md) instance at the source chain. - It is pre-encoded by the message submitter; -- all proofs in the [message lane module](../../modules/message-lane/README.md) transactions are - based on the storage proofs from the bridged chain: storage proof of the outbound message (value - from the `pallet_message_lane::Store::MessagePayload` map), storage proof of the outbound lane - state (value from the `pallet_message_lane::Store::OutboundLanes` map) and storage proof of the - inbound lane state (value from the `pallet_message_lane::Store::InboundLanes` map); -- storage proofs are built at the finalized headers of the corresponding chain. So all message lane - transactions with proofs are verifying storage proofs against finalized chain headers from - Substrate bridge module. - -**IMPORTANT NOTE**: after reading this document, you may refer to our test runtimes -([rialto_messages.rs](../millau/runtime/src/rialto_messages.rs) and/or -[millau_messages.rs](../rialto/runtime/src/millau_messages.rs)) to see how to use these helpers. - -## Contents -- [`MessageBridge` Trait](#messagebridge-trait) -- [`ChainWithMessageLanes` Trait ](#chainwithmessagelanes-trait) -- [Helpers for the Source Chain](#helpers-for-the-source-chain) -- [Helpers for the Target Chain](#helpers-for-the-target-chain) - -## `MessageBridge` Trait - -The essence of your integration will be a struct that implements a `MessageBridge` trait. Let's -review every method and give some implementation hints here: - -- `MessageBridge::maximal_extrinsic_size_on_target_chain`: you will need to return the maximal - extrinsic size of the target chain from this function. This may be the constant that is updated - when your runtime is upgraded, or you may use the - [message lane parameters functionality](../../modules/message-lane/README.md#Non-Essential-Functionality) - to allow the pallet owner to update this value more frequently (you may also want to use this - functionality for all constants that are used in other methods described below). - -- `MessageBridge::weight_limits_of_message_on_bridged_chain`: you'll need to return a range of - dispatch weights that the outbound message may take at the target chain. Please keep in mind that - our helpers assume that the message is an encoded call of the target chain. But we never decode - this call at the source chain. So you can't simply get dispatch weight from pre-dispatch - information. Instead there are two options to prepare this range: if you know which calls are to - be sent over your bridge, then you may just return weight ranges for these particular calls. - Otherwise, if you're going to accept all kinds of calls, you may just return range `[0; maximal - incoming message dispatch weight]`. If you choose the latter, then you shall remember that the - delivery transaction itself has some weight, so you can't accept messages with weight equal to - maximal weight of extrinsic at the target chain. In our test chains, we reject all messages that - have declared dispatch weight larger than 50% of the maximal bridged extrinsic weight. - -- `MessageBridge::weight_of_delivery_transaction`: you will need to return the maximal weight of the - delivery transaction that delivers a given message to the target chain. There are three main - things to notice: - - 1. weight, returned from this function is then used to compute the fee that the - message sender needs to pay for the delivery transaction. So it shall not be a simple dispatch - weight of delivery call - it should be the "weight" of the transaction itself, including per-byte - "weight", "weight" of signed extras and etc. - 1. the delivery transaction brings storage proof of - the message, not the message itself. So your transaction will include extra bytes. We suggest - computing the size of single empty value storage proof at the source chain, increase this value a - bit and hardcode it in the source chain runtime code. This size then must be added to the size of - payload and included in the weight computation; - 1. before implementing this function, please take - a look at the - [weight formula of delivery transaction](../../modules/message-lane/README.md#Weight-of-receive_messages_proof-call). - It adds some extra weight for every additional byte of the proof (everything above - `pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH`), so it's not trivial. Even better, please - refer to [our implementation](../millau/runtime/src/rialto_messages.rs) for test chains for - details. - -- `MessageBridge::weight_of_delivery_confirmation_transaction_on_this_chain`: you'll need to return - the maximal weight of a single message delivery confirmation transaction on this chain. All points - from the previous paragraph are also relevant here. - -- `MessageBridge::this_weight_to_this_balance`: this function needs to convert weight units into fee - units on this chain. Most probably this can be done by calling - `pallet_transaction_payment::Config::WeightToFee::calc()` for passed weight. - -- `MessageBridge::bridged_weight_to_bridged_balance`: this function needs to convert weight units - into fee units on the target chain. The best case is when you have the same conversion formula on - both chains - then you may just call the same formula from the previous paragraph. Otherwise, - you'll need to hardcode this formula into your runtime. - -- `MessageBridge::bridged_balance_to_this_balance`: this may be the easiest method to implement and - the hardest to maintain at the same time. If you don't have any automatic methods to determine - conversion rate, then you'll probably need to maintain it by yourself (by updating conversion - rate, stored in runtime storage). This means that if you're too late with an update, then you risk - to accept messages with lower-than-expected fee. So it may be wise to have some reserve in this - conversion rate, even if that means larger delivery and dispatch fees. - -## `ChainWithMessageLanes` Trait - -Apart from its methods, `MessageBridge` also has two associated types that are implementing the -`ChainWithMessageLanes` trait. One is for this chain and the other is for the bridged chain. The -trait is quite simple and can easily be implemented - you just need to specify types used at the -corresponding chain. There are two exceptions, though. Both may be changed in the future. Here they -are: - -- `ChainWithMessageLanes::Call`: it isn't a good idea to reference bridged chain runtime from your - runtime (cyclic references + maintaining on upgrades). So you can't know the type of bridged chain - call in your runtime. This type isn't actually used at this chain, so you may use `()` instead. - -- `ChainWithMessageLanes::MessageLaneInstance`: this is used to compute runtime storage keys. There - may be several instances of message lane pallet, included in the Runtime. Every instance stores - messages and these messages stored under different keys. When we are verifying storage proofs from - the bridged chain, we should know which instance we're talking to. This is fine, but there's - significant inconvenience with that - this chain runtime must have the same message lane pallet - instance. This does not necessarily mean that we should use the same instance on both chains - - this instance may be used to bridge with another chain/instance, or may not be used at all. - -## Helpers for the Source Chain - -The helpers for the Source Chain reside in the `source` submodule of the -[`messages`](./src/messages.rs) module. The structs are: `FromThisChainMessagePayload`, -`FromBridgedChainMessagesDeliveryProof`, `FromThisChainMessageVerifier`. And the helper functions -are: `maximal_message_size`, `verify_chain_message`, `verify_messages_delivery_proof` and -`estimate_message_dispatch_and_delivery_fee`. - -`FromThisChainMessagePayload` is a message that the sender sends through our bridge. It is the -`pallet_bridge_call_dispatch::MessagePayload`, where `call` field is encoded target chain call. So -at this chain we don't see internals of this call - we just know its size. - -`FromThisChainMessageVerifier` is an implementation of `bp_message_lane::LaneMessageVerifier`. It -has following checks in its `verify_message` method: - -1. it'll verify that the used outbound lane is enabled in our runtime; - -1. it'll reject messages if there are too many undelivered outbound messages at this lane. The - sender need to wait while relayers will do their work before sending the message again; - -1. it'll reject a message if it has the wrong dispatch origin declared. Like if the submitter is not - the root of this chain, but it tries to dispatch the message at the target chain using - `pallet_bridge_call_dispatch::CallOrigin::SourceRoot` origin. Or he has provided wrong signature - in the `pallet_bridge_call_dispatch::CallOrigin::TargetAccount` origin; - -1. it'll reject a message if the delivery and dispatch fee that the submitter wants to pay is lesser - than the fee that is computed using the `estimate_message_dispatch_and_delivery_fee` function. - -`estimate_message_dispatch_and_delivery_fee` returns a minimal fee that the submitter needs to pay -for sending a given message. The fee includes: payment for the delivery transaction at the target -chain, payment for delivery confirmation transaction on this chain, payment for `Call` dispatch at -the target chain and relayer interest. - -`FromBridgedChainMessagesDeliveryProof` holds the lane identifier and the storage proof of this -inbound lane state at the bridged chain. This also holds the hash of the target chain header, that -was used to generate this storage proof. The proof is verified by the -`verify_messages_delivery_proof`, which simply checks that the target chain header is finalized -(using Substrate bridge module) and then reads the inbound lane state from the proof. - -`verify_chain_message` function checks that the message may be delivered to the bridged chain. There -are two main checks: - -1. that the message size is less than or equal to the `2/3` of maximal extrinsic size at the target - chain. We leave `1/3` for signed extras and for the storage proof overhead; - -1. that the message dispatch weight is less than or equal to the `1/2` of maximal normal extrinsic - weight at the target chain. We leave `1/2` for the delivery transaction overhead. - -## Helpers for the Target Chain - -The helpers for the target chain reside in the `target` submodule of the -[`messages`](./src/messages.rs) module. The structs are: `FromBridgedChainMessagePayload`, -`FromBridgedChainMessagesProof`, `FromBridgedChainMessagesProof`. And the helper functions are: -`maximal_incoming_message_dispatch_weight`, `maximal_incoming_message_size` and -`verify_messages_proof`. - -`FromBridgedChainMessagePayload` corresponds to the `FromThisChainMessagePayload` at the bridged -chain. We expect that messages with this payload are stored in the `OutboundMessages` storage map of -the [message lane module](../../modules/message-lane/README.md). This map is used to build -`FromBridgedChainMessagesProof`. The proof holds the lane id, range of message nonces included in -the proof, storage proof of `OutboundMessages` entries and the hash of bridged chain header that has -been used to build the proof. Additionally, there's storage proof may contain the proof of outbound -lane state. It may be required to prune `relayers` entries at this chain (see -[message lane module documentation](../../modules/message-lane/README.md#What-about-other-Constants-in-the-Message-Lane-Module-Configuration-Trait) -for details). This proof is verified by the `verify_messages_proof` function. diff --git a/polkadot/bridges/bin/runtime-common/src/lib.rs b/polkadot/bridges/bin/runtime-common/src/lib.rs deleted file mode 100644 index 2842e3b6592a4867c1f0703877e908a13a10175c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/runtime-common/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Common types/functions that may be used by runtimes of all bridged chains. - -#![cfg_attr(not(feature = "std"), no_std)] - -pub mod messages; -pub mod messages_benchmarking; diff --git a/polkadot/bridges/bin/runtime-common/src/messages.rs b/polkadot/bridges/bin/runtime-common/src/messages.rs deleted file mode 100644 index 04b2317749b0af71d3a8c5e3a2550e3cacd600cd..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/runtime-common/src/messages.rs +++ /dev/null @@ -1,1374 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that allow runtime to act as a source/target endpoint of message lanes. -//! -//! Messages are assumed to be encoded `Call`s of the target chain. Call-dispatch -//! pallet is used to dispatch incoming messages. Message identified by a tuple -//! of to elements - message lane id and message nonce. - -use bp_message_dispatch::MessageDispatch as _; -use bp_message_lane::{ - source_chain::{LaneMessageVerifier, Sender}, - target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages}, - InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, -}; -use bp_runtime::{InstanceId, Size}; -use codec::{Decode, Encode}; -use frame_support::{traits::Instance, weights::Weight, RuntimeDebug}; -use hash_db::Hasher; -use pallet_substrate_bridge::StorageProofChecker; -use sp_runtime::traits::{CheckedAdd, CheckedDiv, CheckedMul}; -use sp_std::{cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive, vec::Vec}; -use sp_trie::StorageProof; - -/// Bidirectional message bridge. -pub trait MessageBridge { - /// Instance id of this bridge. - const INSTANCE: InstanceId; - - /// Relayer interest (in percents). - const RELAYER_FEE_PERCENT: u32; - - /// This chain in context of message bridge. - type ThisChain: ThisChainWithMessageLanes; - /// Bridged chain in context of message bridge. - type BridgedChain: ChainWithMessageLanes; - - /// Maximal extrinsic size on target chain. - fn maximal_extrinsic_size_on_target_chain() -> u32; - - /// Returns feasible weights range for given message payload on the target chain. - /// - /// If message is being sent with the weight that is out of this range, then it - /// should be rejected. - /// - /// Weights returned from this function shall not include transaction overhead - /// (like weight of signature and signed extensions verification), because they're - /// already accounted by the `weight_of_delivery_transaction`. So this function should - /// return pure call dispatch weights range. - fn weight_limits_of_message_on_bridged_chain( - message_payload: &[u8], - ) -> RangeInclusive>>; - - /// Maximal weight of single message delivery transaction on Bridged chain. - fn weight_of_delivery_transaction(message_payload: &[u8]) -> WeightOf>; - - /// Maximal weight of single message delivery confirmation transaction on This chain. - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> WeightOf>; - - /// Convert weight of This chain to the fee (paid in Balance) of This chain. - fn this_weight_to_this_balance(weight: WeightOf>) -> BalanceOf>; - - /// Convert weight of the Bridged chain to the fee (paid in Balance) of the Bridged chain. - fn bridged_weight_to_bridged_balance(weight: WeightOf>) -> BalanceOf>; - - /// Convert Bridged chain Balance into This chain Balance. - fn bridged_balance_to_this_balance(bridged_balance: BalanceOf>) -> BalanceOf>; -} - -/// Chain that has `message-lane` and `call-dispatch` modules. -pub trait ChainWithMessageLanes { - /// Hash used in the chain. - type Hash: Decode; - /// Accound id on the chain. - type AccountId: Encode + Decode; - /// Public key of the chain account that may be used to verify signatures. - type Signer: Decode; - /// Signature type used on the chain. - type Signature: Decode; - /// Call type on the chain. - type Call: Encode + Decode; - /// Type of weight that is used on the chain. This would almost always be a regular - /// `frame_support::weight::Weight`. But since the meaning of weight on different chains - /// may be different, the `WeightOf<>` construct is used to avoid confusion between - /// different weights. - type Weight: From + PartialOrd; - /// Type of balances that is used on the chain. - type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From + Copy; - - /// Instance of the message-lane pallet. - type MessageLaneInstance: Instance; -} - -/// This chain that has `message-lane` and `call-dispatch` modules. -pub trait ThisChainWithMessageLanes: ChainWithMessageLanes { - /// Are we accepting any messages to the given lane? - fn is_outbound_lane_enabled(lane: &LaneId) -> bool; - - /// Maximal number of pending (not yet delivered) messages at this chain. - /// - /// Any messages over this limit, will be rejected. - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce; -} - -pub(crate) type ThisChain = ::ThisChain; -pub(crate) type BridgedChain = ::BridgedChain; -pub(crate) type HashOf = ::Hash; -pub(crate) type AccountIdOf = ::AccountId; -pub(crate) type SignerOf = ::Signer; -pub(crate) type SignatureOf = ::Signature; -pub(crate) type WeightOf = ::Weight; -pub(crate) type BalanceOf = ::Balance; -pub(crate) type CallOf = ::Call; -pub(crate) type MessageLaneInstanceOf = ::MessageLaneInstance; - -/// Raw storage proof type (just raw trie nodes). -type RawStorageProof = Vec>; - -/// Compute weight of transaction at runtime where: -/// -/// - transaction payment pallet is being used; -/// - fee multiplier is zero. -pub fn transaction_weight_without_multiplier( - base_weight: Weight, - payload_size: Weight, - dispatch_weight: Weight, -) -> Weight { - // non-adjustable per-byte weight is mapped 1:1 to tx weight - let per_byte_weight = payload_size; - - // we assume that adjustable per-byte weight is always zero - let adjusted_per_byte_weight = 0; - - // we assume that transaction tip we use is also zero - let transaction_tip_weight = 0; - - base_weight - .saturating_add(per_byte_weight) - .saturating_add(adjusted_per_byte_weight) - .saturating_add(transaction_tip_weight) - .saturating_add(dispatch_weight) -} - -/// Sub-module that is declaring types required for processing This -> Bridged chain messages. -pub mod source { - use super::*; - - /// Encoded Call of the Bridged chain. We never try to decode it on This chain. - pub type BridgedChainOpaqueCall = Vec; - - /// Message payload for This -> Bridged chain messages. - pub type FromThisChainMessagePayload = pallet_bridge_call_dispatch::MessagePayload< - AccountIdOf>, - SignerOf>, - SignatureOf>, - BridgedChainOpaqueCall, - >; - - /// Messages delivery proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of inbound lane state; - /// - lane id. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug)] - pub struct FromBridgedChainMessagesDeliveryProof { - /// Hash of the bridge header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// Storage trie proof generated for [`Self::bridged_header_hash`]. - pub storage_proof: RawStorageProof, - /// Lane id of which messages were delivered and the proof is for. - pub lane: LaneId, - } - - impl Size for FromBridgedChainMessagesDeliveryProof { - fn size_hint(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// 'Parsed' message delivery proof - inbound lane id and its state. - pub type ParsedMessagesDeliveryProofFromBridgedChain = (LaneId, InboundLaneData>>); - - /// Message verifier that is doing all basic checks. - /// - /// This verifier assumes following: - /// - /// - all message lanes are equivalent, so all checks are the same; - /// - messages are being dispatched using `pallet-bridge-call-dispatch` pallet on the target chain. - /// - /// Following checks are made: - /// - /// - message is rejected if its lane is currently blocked; - /// - message is rejected if there are too many pending (undelivered) messages at the outbound lane; - /// - check that the sender has rights to dispatch the call on target chain using provided dispatch origin; - /// - check that the sender has paid enough funds for both message delivery and dispatch. - #[derive(RuntimeDebug)] - pub struct FromThisChainMessageVerifier(PhantomData); - - pub(crate) const OUTBOUND_LANE_DISABLED: &str = "The outbound message lane is disabled."; - pub(crate) const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane."; - pub(crate) const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin."; - pub(crate) const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane."; - - impl LaneMessageVerifier>, FromThisChainMessagePayload, BalanceOf>> - for FromThisChainMessageVerifier - where - B: MessageBridge, - AccountIdOf>: PartialEq + Clone, - { - type Error = &'static str; - - fn verify_message( - submitter: &Sender>>, - delivery_and_dispatch_fee: &BalanceOf>, - lane: &LaneId, - lane_outbound_data: &OutboundLaneData, - payload: &FromThisChainMessagePayload, - ) -> Result<(), Self::Error> { - // reject message if lane is blocked - if !ThisChain::::is_outbound_lane_enabled(lane) { - return Err(OUTBOUND_LANE_DISABLED); - } - - // reject message if there are too many pending messages at this lane - let max_pending_messages = ThisChain::::maximal_pending_messages_at_outbound_lane(); - let pending_messages = lane_outbound_data - .latest_generated_nonce - .saturating_sub(lane_outbound_data.latest_received_nonce); - if pending_messages > max_pending_messages { - return Err(TOO_MANY_PENDING_MESSAGES); - } - - // Do the dispatch-specific check. We assume that the target chain uses - // `CallDispatch`, so we verify the message accordingly. - pallet_bridge_call_dispatch::verify_message_origin(submitter, payload).map_err(|_| BAD_ORIGIN)?; - - let minimal_fee_in_this_tokens = - estimate_message_dispatch_and_delivery_fee::(payload, B::RELAYER_FEE_PERCENT)?; - - // compare with actual fee paid - if *delivery_and_dispatch_fee < minimal_fee_in_this_tokens { - return Err(TOO_LOW_FEE); - } - - Ok(()) - } - } - - /// Return maximal message size of This -> Bridged chain message. - pub fn maximal_message_size() -> u32 { - super::target::maximal_incoming_message_size(B::maximal_extrinsic_size_on_target_chain()) - } - - /// Do basic Bridged-chain specific verification of This -> Bridged chain message. - /// - /// Ok result from this function means that the delivery transaction with this message - /// may be 'mined' by the target chain. But the lane may have its own checks (e.g. fee - /// check) that would reject message (see `FromThisChainMessageVerifier`). - pub fn verify_chain_message( - payload: &FromThisChainMessagePayload, - ) -> Result<(), &'static str> { - let weight_limits = B::weight_limits_of_message_on_bridged_chain(&payload.call); - if !weight_limits.contains(&payload.weight.into()) { - return Err("Incorrect message weight declared"); - } - - // The maximal size of extrinsic at Substrate-based chain depends on the - // `frame_system::Config::MaximumBlockLength` and `frame_system::Config::AvailableBlockRatio` - // constants. This check is here to be sure that the lane won't stuck because message is too - // large to fit into delivery transaction. - // - // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not - // the message itself. The proof is always larger than the message. But unless chain state - // is enormously large, it should be several dozens/hundreds of bytes. The delivery - // transaction also contains signatures and signed extensions. Because of this, we reserve - // 1/3 of the the maximal extrinsic weight for this data. - if payload.call.len() > maximal_message_size::() as usize { - return Err("The message is too large to be sent over the lane"); - } - - Ok(()) - } - - /// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged chain. - /// - /// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional conversions. - /// Returns `None` if overflow has happened. - pub fn estimate_message_dispatch_and_delivery_fee( - payload: &FromThisChainMessagePayload, - relayer_fee_percent: u32, - ) -> Result>, &'static str> { - // the fee (in Bridged tokens) of all transactions that are made on the Bridged chain - let delivery_fee = B::bridged_weight_to_bridged_balance(B::weight_of_delivery_transaction(&payload.call)); - let dispatch_fee = B::bridged_weight_to_bridged_balance(payload.weight.into()); - - // the fee (in This tokens) of all transactions that are made on This chain - let delivery_confirmation_fee = - B::this_weight_to_this_balance(B::weight_of_delivery_confirmation_transaction_on_this_chain()); - - // minimal fee (in This tokens) is a sum of all required fees - let minimal_fee = delivery_fee - .checked_add(&dispatch_fee) - .map(B::bridged_balance_to_this_balance) - .and_then(|fee| fee.checked_add(&delivery_confirmation_fee)); - - // before returning, add extra fee that is paid to the relayer (relayer interest) - minimal_fee - .and_then(|fee| - // having message with fee that is near the `Balance::MAX_VALUE` of the chain is - // unlikely and should be treated as an error - // => let's do multiplication first - fee - .checked_mul(&relayer_fee_percent.into()) - .and_then(|interest| interest.checked_div(&100u32.into())) - .and_then(|interest| fee.checked_add(&interest))) - .ok_or("Overflow when computing minimal required message delivery and dispatch fee") - } - - /// Verify proof of This -> Bridged chain messages delivery. - pub fn verify_messages_delivery_proof( - proof: FromBridgedChainMessagesDeliveryProof>>, - ) -> Result, &'static str> - where - ThisRuntime: pallet_substrate_bridge::Config, - ThisRuntime: pallet_message_lane::Config>>, - HashOf>: - Into::BridgedChain>>, - { - let FromBridgedChainMessagesDeliveryProof { - bridged_header_hash, - storage_proof, - lane, - } = proof; - pallet_substrate_bridge::Module::::parse_finalized_storage_proof( - bridged_header_hash.into(), - StorageProof::new(storage_proof), - |storage| { - // Messages delivery proof is just proof of single storage key read => any error - // is fatal. - let storage_inbound_lane_data_key = pallet_message_lane::storage_keys::inbound_lane_data_key::< - ThisRuntime, - MessageLaneInstanceOf>, - >(&lane); - let raw_inbound_lane_data = storage - .read_value(storage_inbound_lane_data_key.0.as_ref()) - .map_err(|_| "Failed to read inbound lane state from storage proof")? - .ok_or("Inbound lane state is missing from the messages proof")?; - let inbound_lane_data = InboundLaneData::decode(&mut &raw_inbound_lane_data[..]) - .map_err(|_| "Failed to decode inbound lane state from the proof")?; - - Ok((lane, inbound_lane_data)) - }, - ) - .map_err(<&'static str>::from)? - } -} - -/// Sub-module that is declaring types required for processing Bridged -> This chain messages. -pub mod target { - use super::*; - - /// Call origin for Bridged -> This chain messages. - pub type FromBridgedChainMessageCallOrigin = pallet_bridge_call_dispatch::CallOrigin< - AccountIdOf>, - SignerOf>, - SignatureOf>, - >; - - /// Decoded Bridged -> This message payload. - pub type FromBridgedChainMessagePayload = pallet_bridge_call_dispatch::MessagePayload< - AccountIdOf>, - SignerOf>, - SignatureOf>, - FromBridgedChainEncodedMessageCall, - >; - - /// Messages proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of messages and (optionally) outbound lane state; - /// - lane id; - /// - nonces (inclusive range) of messages which are included in this proof. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug)] - pub struct FromBridgedChainMessagesProof { - /// Hash of the finalized bridged header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// A storage trie proof of messages being delivered. - pub storage_proof: RawStorageProof, - pub lane: LaneId, - /// Nonce of the first message being delivered. - pub nonces_start: MessageNonce, - /// Nonce of the last message being delivered. - pub nonces_end: MessageNonce, - } - - impl Size for FromBridgedChainMessagesProof { - fn size_hint(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// Encoded Call of This chain as it is transferred over bridge. - /// - /// Our Call is opaque (`Vec`) for Bridged chain. So it is encoded, prefixed with - /// vector length. Custom decode implementation here is exactly to deal with this. - #[derive(Decode, Encode, RuntimeDebug, PartialEq)] - pub struct FromBridgedChainEncodedMessageCall { - pub(crate) encoded_call: Vec, - pub(crate) _marker: PhantomData, - } - - impl From> for Result>, ()> { - fn from(encoded_call: FromBridgedChainEncodedMessageCall) -> Self { - CallOf::>::decode(&mut &encoded_call.encoded_call[..]).map_err(drop) - } - } - - /// Dispatching Bridged -> This chain messages. - #[derive(RuntimeDebug, Clone, Copy)] - pub struct FromBridgedChainMessageDispatch { - _marker: PhantomData<(B, ThisRuntime, ThisCallDispatchInstance)>, - } - - impl - MessageDispatch< as ChainWithMessageLanes>::Balance> - for FromBridgedChainMessageDispatch - where - ThisCallDispatchInstance: frame_support::traits::Instance, - ThisRuntime: pallet_bridge_call_dispatch::Config, - >::Event: - From>, - pallet_bridge_call_dispatch::Module: - bp_message_dispatch::MessageDispatch<(LaneId, MessageNonce), Message = FromBridgedChainMessagePayload>, - { - type DispatchPayload = FromBridgedChainMessagePayload; - - fn dispatch_weight( - message: &DispatchMessage>>, - ) -> frame_support::weights::Weight { - message.data.payload.as_ref().map(|payload| payload.weight).unwrap_or(0) - } - - fn dispatch(message: DispatchMessage>>) { - let message_id = (message.key.lane_id, message.key.nonce); - pallet_bridge_call_dispatch::Module::::dispatch( - B::INSTANCE, - message_id, - message.data.payload.map_err(drop), - ); - } - } - - /// Return maximal dispatch weight of the message we're able to receive. - pub fn maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - maximal_extrinsic_weight / 2 - } - - /// Return maximal message size given maximal extrinsic size. - pub fn maximal_incoming_message_size(maximal_extrinsic_size: u32) -> u32 { - maximal_extrinsic_size / 3 * 2 - } - - /// Verify proof of Bridged -> This chain messages. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - pub fn verify_messages_proof( - proof: FromBridgedChainMessagesProof>>, - messages_count: u32, - ) -> Result>>>, &'static str> - where - ThisRuntime: pallet_substrate_bridge::Config, - ThisRuntime: pallet_message_lane::Config>>, - HashOf>: - Into::BridgedChain>>, - { - verify_messages_proof_with_parser::( - proof, - messages_count, - |bridged_header_hash, bridged_storage_proof| { - pallet_substrate_bridge::Module::::parse_finalized_storage_proof( - bridged_header_hash.into(), - StorageProof::new(bridged_storage_proof), - |storage_adapter| storage_adapter, - ) - .map(|storage| StorageProofCheckerAdapter::<_, B, ThisRuntime> { - storage, - _dummy: Default::default(), - }) - .map_err(|err| MessageProofError::Custom(err.into())) - }, - ) - .map_err(Into::into) - } - - #[derive(Debug, PartialEq)] - pub(crate) enum MessageProofError { - Empty, - MessagesCountMismatch, - MissingRequiredMessage, - FailedToDecodeMessage, - FailedToDecodeOutboundLaneState, - Custom(&'static str), - } - - impl From for &'static str { - fn from(err: MessageProofError) -> &'static str { - match err { - MessageProofError::Empty => "Messages proof is empty", - MessageProofError::MessagesCountMismatch => "Declared messages count doesn't match actual value", - MessageProofError::MissingRequiredMessage => "Message is missing from the proof", - MessageProofError::FailedToDecodeMessage => "Failed to decode message from the proof", - MessageProofError::FailedToDecodeOutboundLaneState => { - "Failed to decode outbound lane data from the proof" - } - MessageProofError::Custom(err) => err, - } - } - } - - pub(crate) trait MessageProofParser { - fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option>; - fn read_raw_message(&self, message_key: &MessageKey) -> Option>; - } - - struct StorageProofCheckerAdapter { - storage: StorageProofChecker, - _dummy: sp_std::marker::PhantomData<(B, ThisRuntime)>, - } - - impl MessageProofParser for StorageProofCheckerAdapter - where - H: Hasher, - B: MessageBridge, - ThisRuntime: pallet_message_lane::Config>>, - { - fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option> { - let storage_outbound_lane_data_key = pallet_message_lane::storage_keys::outbound_lane_data_key::< - MessageLaneInstanceOf>, - >(lane_id); - self.storage - .read_value(storage_outbound_lane_data_key.0.as_ref()) - .ok()? - } - - fn read_raw_message(&self, message_key: &MessageKey) -> Option> { - let storage_message_key = pallet_message_lane::storage_keys::message_key::< - ThisRuntime, - MessageLaneInstanceOf>, - >(&message_key.lane_id, message_key.nonce); - self.storage.read_value(storage_message_key.0.as_ref()).ok()? - } - } - - /// Verify proof of Bridged -> This chain messages using given message proof parser. - pub(crate) fn verify_messages_proof_with_parser( - proof: FromBridgedChainMessagesProof>>, - messages_count: u32, - build_parser: BuildParser, - ) -> Result>>>, MessageProofError> - where - BuildParser: FnOnce(HashOf>, RawStorageProof) -> Result, - Parser: MessageProofParser, - { - let FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane, - nonces_start, - nonces_end, - } = proof; - - // receiving proofs where end < begin is ok (if proof includes outbound lane state) - let messages_in_the_proof = if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) { - // let's check that the user (relayer) has passed correct `messages_count` - // (this bounds maximal capacity of messages vec below) - let messages_in_the_proof = nonces_difference.saturating_add(1); - if messages_in_the_proof != MessageNonce::from(messages_count) { - return Err(MessageProofError::MessagesCountMismatch); - } - - messages_in_the_proof - } else { - 0 - }; - - let parser = build_parser(bridged_header_hash, storage_proof)?; - - // Read messages first. All messages that are claimed to be in the proof must - // be in the proof. So any error in `read_value`, or even missing value is fatal. - // - // Mind that we allow proofs with no messages if outbound lane state is proved. - let mut messages = Vec::with_capacity(messages_in_the_proof as _); - for nonce in nonces_start..=nonces_end { - let message_key = MessageKey { lane_id: lane, nonce }; - let raw_message_data = parser - .read_raw_message(&message_key) - .ok_or(MessageProofError::MissingRequiredMessage)?; - let message_data = MessageData::>>::decode(&mut &raw_message_data[..]) - .map_err(|_| MessageProofError::FailedToDecodeMessage)?; - messages.push(Message { - key: message_key, - data: message_data, - }); - } - - // Now let's check if proof contains outbound lane state proof. It is optional, so we - // simply ignore `read_value` errors and missing value. - let mut proved_lane_messages = ProvedLaneMessages { - lane_state: None, - messages, - }; - let raw_outbound_lane_data = parser.read_raw_outbound_lane_data(&lane); - if let Some(raw_outbound_lane_data) = raw_outbound_lane_data { - proved_lane_messages.lane_state = Some( - OutboundLaneData::decode(&mut &raw_outbound_lane_data[..]) - .map_err(|_| MessageProofError::FailedToDecodeOutboundLaneState)?, - ); - } - - // Now we may actually check if the proof is empty or not. - if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { - return Err(MessageProofError::Empty); - } - - // We only support single lane messages in this schema - let mut proved_messages = ProvedMessages::new(); - proved_messages.insert(lane, proved_lane_messages); - - Ok(proved_messages) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use codec::{Decode, Encode}; - use frame_support::weights::Weight; - use std::ops::RangeInclusive; - - const DELIVERY_TRANSACTION_WEIGHT: Weight = 100; - const DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT: Weight = 100; - const THIS_CHAIN_WEIGHT_TO_BALANCE_RATE: Weight = 2; - const BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE: Weight = 4; - const BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE: u32 = 6; - const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: Weight = 2048; - const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; - - /// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from BridgedChain; - #[derive(Debug, PartialEq, Eq)] - struct OnThisChainBridge; - - impl MessageBridge for OnThisChainBridge { - const INSTANCE: InstanceId = *b"this"; - const RELAYER_FEE_PERCENT: u32 = 10; - - type ThisChain = ThisChain; - type BridgedChain = BridgedChain; - - fn maximal_extrinsic_size_on_target_chain() -> u32 { - BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE - } - - fn weight_limits_of_message_on_bridged_chain(message_payload: &[u8]) -> RangeInclusive { - let begin = std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight); - begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT - } - - fn weight_of_delivery_transaction(_message_payload: &[u8]) -> Weight { - DELIVERY_TRANSACTION_WEIGHT - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT - } - - fn this_weight_to_this_balance(weight: Weight) -> ThisChainBalance { - ThisChainBalance(weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) - } - - fn bridged_weight_to_bridged_balance(weight: Weight) -> BridgedChainBalance { - BridgedChainBalance(weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) - } - - fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance { - ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32) - } - } - - /// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from ThisChain; - #[derive(Debug, PartialEq, Eq)] - struct OnBridgedChainBridge; - - impl MessageBridge for OnBridgedChainBridge { - const INSTANCE: InstanceId = *b"brdg"; - const RELAYER_FEE_PERCENT: u32 = 20; - - type ThisChain = BridgedChain; - type BridgedChain = ThisChain; - - fn maximal_extrinsic_size_on_target_chain() -> u32 { - unreachable!() - } - - fn weight_limits_of_message_on_bridged_chain(_message_payload: &[u8]) -> RangeInclusive { - unreachable!() - } - - fn weight_of_delivery_transaction(_message_payload: &[u8]) -> Weight { - unreachable!() - } - - fn weight_of_delivery_confirmation_transaction_on_this_chain() -> Weight { - unreachable!() - } - - fn this_weight_to_this_balance(_weight: Weight) -> BridgedChainBalance { - unreachable!() - } - - fn bridged_weight_to_bridged_balance(_weight: Weight) -> ThisChainBalance { - unreachable!() - } - - fn bridged_balance_to_this_balance(_this_balance: ThisChainBalance) -> BridgedChainBalance { - unreachable!() - } - } - - #[derive(Debug, PartialEq, Decode, Encode, Clone)] - struct ThisChainAccountId(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct ThisChainSigner(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct ThisChainSignature(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - enum ThisChainCall { - #[codec(index = 42)] - Transfer, - #[codec(index = 84)] - Mint, - } - - #[derive(Debug, PartialEq, Decode, Encode)] - struct BridgedChainAccountId(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct BridgedChainSigner(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct BridgedChainSignature(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - enum BridgedChainCall {} - - macro_rules! impl_wrapped_balance { - ($name:ident) => { - #[derive(Debug, PartialEq, Decode, Encode, Clone, Copy)] - struct $name(u32); - - impl From for $name { - fn from(balance: u32) -> Self { - Self(balance) - } - } - - impl sp_std::ops::Add for $name { - type Output = $name; - - fn add(self, other: Self) -> Self { - Self(self.0 + other.0) - } - } - - impl sp_std::ops::Div for $name { - type Output = $name; - - fn div(self, other: Self) -> Self { - Self(self.0 / other.0) - } - } - - impl sp_std::ops::Mul for $name { - type Output = $name; - - fn mul(self, other: Self) -> Self { - Self(self.0 * other.0) - } - } - - impl sp_std::cmp::PartialOrd for $name { - fn partial_cmp(&self, other: &Self) -> Option { - self.0.partial_cmp(&other.0) - } - } - - impl CheckedAdd for $name { - fn checked_add(&self, other: &Self) -> Option { - self.0.checked_add(other.0).map(Self) - } - } - - impl CheckedDiv for $name { - fn checked_div(&self, other: &Self) -> Option { - self.0.checked_div(other.0).map(Self) - } - } - - impl CheckedMul for $name { - fn checked_mul(&self, other: &Self) -> Option { - self.0.checked_mul(other.0).map(Self) - } - } - }; - } - - impl_wrapped_balance!(ThisChainBalance); - impl_wrapped_balance!(BridgedChainBalance); - - struct ThisChain; - - impl ChainWithMessageLanes for ThisChain { - type Hash = (); - type AccountId = ThisChainAccountId; - type Signer = ThisChainSigner; - type Signature = ThisChainSignature; - type Call = ThisChainCall; - type Weight = frame_support::weights::Weight; - type Balance = ThisChainBalance; - - type MessageLaneInstance = pallet_message_lane::DefaultInstance; - } - - impl ThisChainWithMessageLanes for ThisChain { - fn is_outbound_lane_enabled(lane: &LaneId) -> bool { - lane == TEST_LANE_ID - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE - } - } - - struct BridgedChain; - - impl ChainWithMessageLanes for BridgedChain { - type Hash = (); - type AccountId = BridgedChainAccountId; - type Signer = BridgedChainSigner; - type Signature = BridgedChainSignature; - type Call = BridgedChainCall; - type Weight = frame_support::weights::Weight; - type Balance = BridgedChainBalance; - - type MessageLaneInstance = pallet_message_lane::DefaultInstance; - } - - impl ThisChainWithMessageLanes for BridgedChain { - fn is_outbound_lane_enabled(_lane: &LaneId) -> bool { - unreachable!() - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - unreachable!() - } - } - - fn test_lane_outbound_data() -> OutboundLaneData { - OutboundLaneData::default() - } - - #[test] - fn message_from_bridged_chain_is_decoded() { - // the message is encoded on the bridged chain - let message_on_bridged_chain = source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: ThisChainCall::Transfer.encode(), - } - .encode(); - - // and sent to this chain where it is decoded - let message_on_this_chain = - target::FromBridgedChainMessagePayload::::decode(&mut &message_on_bridged_chain[..]) - .unwrap(); - assert_eq!( - message_on_this_chain, - target::FromBridgedChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: target::FromBridgedChainEncodedMessageCall:: { - encoded_call: ThisChainCall::Transfer.encode(), - _marker: PhantomData::default(), - }, - } - ); - assert_eq!(Ok(ThisChainCall::Transfer), message_on_this_chain.call.into()); - } - - const TEST_LANE_ID: &LaneId = b"test"; - const MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE: MessageNonce = 32; - - fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload { - source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: vec![42], - } - } - - #[test] - fn message_fee_is_checked_by_verifier() { - const EXPECTED_MINIMAL_FEE: u32 = 5500; - - // payload of the This -> Bridged chain message - let payload = regular_outbound_message_payload(); - - // let's check if estimation matching hardcoded value - assert_eq!( - source::estimate_message_dispatch_and_delivery_fee::( - &payload, - OnThisChainBridge::RELAYER_FEE_PERCENT, - ), - Ok(ThisChainBalance(EXPECTED_MINIMAL_FEE)), - ); - - // and now check that the verifier checks the fee - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Root, - &ThisChainBalance(1), - &TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::TOO_LOW_FEE) - ); - assert!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Root, - &ThisChainBalance(1_000_000), - &TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(), - ); - } - - #[test] - fn should_disallow_root_calls_from_regular_accounts() { - // payload of the This -> Bridged chain message - let payload = source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: vec![42], - }; - - // and now check that the verifier checks the fee - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Signed(ThisChainAccountId(0)), - &ThisChainBalance(1_000_000), - &TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::BAD_ORIGIN) - ); - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::None, - &ThisChainBalance(1_000_000), - &TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::BAD_ORIGIN) - ); - assert!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Root, - &ThisChainBalance(1_000_000), - &TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(), - ); - } - - #[test] - fn should_verify_source_and_target_origin_matching() { - // payload of the This -> Bridged chain message - let payload = source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceAccount(ThisChainAccountId(1)), - call: vec![42], - }; - - // and now check that the verifier checks the fee - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Signed(ThisChainAccountId(0)), - &ThisChainBalance(1_000_000), - &TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::BAD_ORIGIN) - ); - assert!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Signed(ThisChainAccountId(1)), - &ThisChainBalance(1_000_000), - &TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(), - ); - } - - #[test] - fn message_is_rejected_when_sent_using_disabled_lane() { - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Root, - &ThisChainBalance(1_000_000), - b"dsbl", - &test_lane_outbound_data(), - ®ular_outbound_message_payload(), - ), - Err(source::OUTBOUND_LANE_DISABLED) - ); - } - - #[test] - fn message_is_rejected_when_there_are_too_many_pending_messages_at_outbound_lane() { - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &Sender::Root, - &ThisChainBalance(1_000_000), - &TEST_LANE_ID, - &OutboundLaneData { - latest_received_nonce: 100, - latest_generated_nonce: 100 + MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE + 1, - ..Default::default() - }, - ®ular_outbound_message_payload(), - ), - Err(source::TOO_MANY_PENDING_MESSAGES) - ); - } - - #[test] - fn verify_chain_message_rejects_message_with_too_small_declared_weight() { - assert!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { - spec_version: 1, - weight: 5, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: vec![1, 2, 3, 4, 5, 6], - },) - .is_err() - ); - } - - #[test] - fn verify_chain_message_rejects_message_with_too_large_declared_weight() { - assert!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { - spec_version: 1, - weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: vec![1, 2, 3, 4, 5, 6], - },) - .is_err() - ); - } - - #[test] - fn verify_chain_message_rejects_message_too_large_message() { - assert!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { - spec_version: 1, - weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: vec![0; source::maximal_message_size::() as usize + 1], - },) - .is_err() - ); - } - - #[test] - fn verify_chain_message_accepts_maximal_message() { - assert_eq!( - source::verify_chain_message::(&source::FromThisChainMessagePayload::< - OnThisChainBridge, - > { - spec_version: 1, - weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: vec![0; source::maximal_message_size::() as _], - },), - Ok(()), - ); - } - - #[derive(Debug)] - struct TestMessageProofParser { - failing: bool, - messages: RangeInclusive, - outbound_lane_data: Option, - } - - impl target::MessageProofParser for TestMessageProofParser { - fn read_raw_outbound_lane_data(&self, _lane_id: &LaneId) -> Option> { - if self.failing { - Some(vec![]) - } else { - self.outbound_lane_data.clone().map(|data| data.encode()) - } - } - - fn read_raw_message(&self, message_key: &MessageKey) -> Option> { - if self.failing { - Some(vec![]) - } else if self.messages.contains(&message_key.nonce) { - Some( - MessageData:: { - payload: message_key.nonce.encode(), - fee: BridgedChainBalance(0), - } - .encode(), - ) - } else { - None - } - } - } - - #[allow(clippy::reversed_empty_ranges)] - fn no_messages_range() -> RangeInclusive { - 1..=0 - } - - fn messages_proof(nonces_end: MessageNonce) -> target::FromBridgedChainMessagesProof<()> { - target::FromBridgedChainMessagesProof { - bridged_header_hash: (), - storage_proof: vec![], - lane: Default::default(), - nonces_start: 1, - nonces_end, - } - } - - #[test] - fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 5, - |_, _| unreachable!(), - ), - Err(target::MessageProofError::MessagesCountMismatch), - ); - } - - #[test] - fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 15, - |_, _| unreachable!(), - ), - Err(target::MessageProofError::MessagesCountMismatch), - ); - } - - #[test] - fn message_proof_is_rejected_if_build_parser_fails() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 10, - |_, _| Err(target::MessageProofError::Custom("test")), - ), - Err(target::MessageProofError::Custom("test")), - ); - } - - #[test] - fn message_proof_is_rejected_if_required_message_is_missing() { - assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(10), 10, |_, _| Ok( - TestMessageProofParser { - failing: false, - messages: 1..=5, - outbound_lane_data: None, - } - ),), - Err(target::MessageProofError::MissingRequiredMessage), - ); - } - - #[test] - fn message_proof_is_rejected_if_message_decode_fails() { - assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(10), 10, |_, _| Ok( - TestMessageProofParser { - failing: true, - messages: 1..=10, - outbound_lane_data: None, - } - ),), - Err(target::MessageProofError::FailedToDecodeMessage), - ); - } - - #[test] - fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { - assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( - TestMessageProofParser { - failing: true, - messages: no_messages_range(), - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - } - ),), - Err(target::MessageProofError::FailedToDecodeOutboundLaneState), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_is_empty() { - assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( - TestMessageProofParser { - failing: false, - messages: no_messages_range(), - outbound_lane_data: None, - } - ),), - Err(target::MessageProofError::Empty), - ); - } - - #[test] - fn non_empty_message_proof_without_messages_is_accepted() { - assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( - TestMessageProofParser { - failing: false, - messages: no_messages_range(), - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - } - ),), - Ok(vec![( - Default::default(), - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: Vec::new(), - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn non_empty_message_proof_is_accepted() { - assert_eq!( - target::verify_messages_proof_with_parser::(messages_proof(1), 1, |_, _| Ok( - TestMessageProofParser { - failing: false, - messages: 1..=1, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - } - ),), - Ok(vec![( - Default::default(), - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: vec![Message { - key: MessageKey { - lane_id: Default::default(), - nonce: 1 - }, - data: MessageData { - payload: 1u64.encode(), - fee: BridgedChainBalance(0) - }, - }], - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn verify_messages_proof_with_parser_does_not_panic_if_messages_count_mismatches() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(u64::MAX), - 0, - |_, _| Ok(TestMessageProofParser { - failing: false, - messages: 0..=u64::MAX, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - }), - ), - Err(target::MessageProofError::MessagesCountMismatch), - ); - } -} diff --git a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs deleted file mode 100644 index 4aa2abbd6b471f1e89609cd784837dd0d90544ab..0000000000000000000000000000000000000000 --- a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to run benchmarks of message-lanes, based on -//! `bridge_runtime_common::messages` implementation. - -#![cfg(feature = "runtime-benchmarks")] - -use crate::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, AccountIdOf, BalanceOf, - BridgedChain, HashOf, MessageBridge, ThisChain, -}; - -use bp_message_lane::{LaneId, MessageData, MessageKey, MessagePayload}; -use codec::Encode; -use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH}; -use frame_support::weights::Weight; -use pallet_message_lane::benchmarking::{MessageDeliveryProofParams, MessageProofParams, ProofSize}; -use sp_core::Hasher; -use sp_runtime::traits::Header; -use sp_std::prelude::*; -use sp_trie::{record_all_keys, trie_types::TrieDBMut, Layout, MemoryDB, Recorder, TrieMut}; - -/// Generate ed25519 signature to be used in `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`. -/// -/// Returns public key of the signer and the signature itself. -pub fn ed25519_sign(target_call: &impl Encode, source_account_id: &impl Encode) -> ([u8; 32], [u8; 64]) { - // key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html) - let target_secret = SecretKey::from_bytes(&[ - 157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, 197, 105, 123, 050, - 105, 025, 112, 059, 172, 003, 028, 174, 127, 096, - ]) - .expect("harcoded key is valid"); - let target_public: PublicKey = (&target_secret).into(); - - let mut target_pair_bytes = [0u8; KEYPAIR_LENGTH]; - target_pair_bytes[..SECRET_KEY_LENGTH].copy_from_slice(&target_secret.to_bytes()); - target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes()); - let target_pair = ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid"); - - let mut signature_message = Vec::new(); - target_call.encode_to(&mut signature_message); - source_account_id.encode_to(&mut signature_message); - let target_origin_signature = target_pair - .try_sign(&signature_message) - .expect("Ed25519 try_sign should not fail in benchmarks"); - - (target_public.to_bytes(), target_origin_signature.to_bytes()) -} - -/// Prepare proof of messages for the `receive_messages_proof` call. -pub fn prepare_message_proof( - params: MessageProofParams, - make_bridged_message_storage_key: MM, - make_bridged_outbound_lane_data_key: ML, - make_bridged_header: MH, - message_dispatch_weight: Weight, - message_payload: MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) -where - B: MessageBridge, - H: Hasher, - R: pallet_substrate_bridge::Config, - ::Hash: Into>>, - MM: Fn(MessageKey) -> Vec, - ML: Fn(LaneId) -> Vec, - MH: Fn(H::Out) -> ::Header, -{ - // prepare Bridged chain storage with messages and (optionally) outbound lane state - let message_count = params - .message_nonces - .end() - .saturating_sub(*params.message_nonces.start()) - + 1; - let mut storage_keys = Vec::with_capacity(message_count as usize + 1); - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - - // insert messages - for nonce in params.message_nonces.clone() { - let message_key = MessageKey { - lane_id: params.lane, - nonce, - }; - let message_data = MessageData { - fee: BalanceOf::>::from(0), - payload: message_payload.clone(), - }; - let storage_key = make_bridged_message_storage_key(message_key); - trie.insert(&storage_key, &message_data.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - } - - // insert outbound lane state - if let Some(outbound_lane_data) = params.outbound_lane_data { - let storage_key = make_bridged_outbound_lane_data_key(params.lane); - trie.insert(&storage_key, &outbound_lane_data.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - } - } - root = grow_trie(root, &mut mdb, params.size); - - // generate storage proof to be delivered to This chain - let mut proof_recorder = Recorder::::new(); - record_all_keys::, _>(&mdb, &root, &mut proof_recorder) - .map_err(|_| "record_all_keys has failed") - .expect("record_all_keys should not fail in benchmarks"); - let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect(); - - // prepare Bridged chain header and insert it into the Substrate pallet - let bridged_header = make_bridged_header(root); - let bridged_header_hash = bridged_header.hash(); - pallet_substrate_bridge::initialize_for_benchmarks::(bridged_header); - - ( - FromBridgedChainMessagesProof { - bridged_header_hash: bridged_header_hash.into(), - storage_proof, - lane: params.lane, - nonces_start: *params.message_nonces.start(), - nonces_end: *params.message_nonces.end(), - }, - message_dispatch_weight - .checked_mul(message_count) - .expect("too many messages requested by benchmark"), - ) -} - -/// Prepare proof of messages delivery for the `receive_messages_delivery_proof` call. -pub fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams>>, - make_bridged_inbound_lane_data_key: ML, - make_bridged_header: MH, -) -> FromBridgedChainMessagesDeliveryProof>> -where - B: MessageBridge, - H: Hasher, - R: pallet_substrate_bridge::Config, - ::Hash: Into>>, - ML: Fn(LaneId) -> Vec, - MH: Fn(H::Out) -> ::Header, -{ - // prepare Bridged chain storage with inbound lane state - let storage_key = make_bridged_inbound_lane_data_key(params.lane); - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - trie.insert(&storage_key, ¶ms.inbound_lane_data.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - } - root = grow_trie(root, &mut mdb, params.size); - - // generate storage proof to be delivered to This chain - let mut proof_recorder = Recorder::::new(); - record_all_keys::, _>(&mdb, &root, &mut proof_recorder) - .map_err(|_| "record_all_keys has failed") - .expect("record_all_keys should not fail in benchmarks"); - let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect(); - - // prepare Bridged chain header and insert it into the Substrate pallet - let bridged_header = make_bridged_header(root); - let bridged_header_hash = bridged_header.hash(); - pallet_substrate_bridge::initialize_for_benchmarks::(bridged_header); - - FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: bridged_header_hash.into(), - storage_proof, - lane: params.lane, - } -} - -/// Populate trie with dummy keys+values until trie has at least given size. -fn grow_trie(mut root: H::Out, mdb: &mut MemoryDB, trie_size: ProofSize) -> H::Out { - let (iterations, leaf_size, minimal_trie_size) = match trie_size { - ProofSize::Minimal(_) => return root, - ProofSize::HasLargeLeaf(size) => (1, size, size), - ProofSize::HasExtraNodes(size) => (8, 1, size), - }; - - let mut key_index = 0; - loop { - // generate storage proof to be delivered to This chain - let mut proof_recorder = Recorder::::new(); - record_all_keys::, _>(mdb, &root, &mut proof_recorder) - .map_err(|_| "record_all_keys has failed") - .expect("record_all_keys should not fail in benchmarks"); - let size: usize = proof_recorder.drain().into_iter().map(|n| n.data.len()).sum(); - if size > minimal_trie_size as _ { - return root; - } - - let mut trie = TrieDBMut::::from_existing(mdb, &mut root) - .map_err(|_| "TrieDBMut::from_existing has failed") - .expect("TrieDBMut::from_existing should not fail in benchmarks"); - for _ in 0..iterations { - trie.insert(&key_index.encode(), &vec![42u8; leaf_size as _]) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - key_index += 1; - } - trie.commit(); - } -} diff --git a/polkadot/bridges/deny.toml b/polkadot/bridges/deny.toml deleted file mode 100644 index 2e384622f5ee5fffc96e893c5f6cea296b030d93..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deny.toml +++ /dev/null @@ -1,201 +0,0 @@ -# This template contains all of the possible sections and their default values - -# Note that all fields that take a lint level have these possible values: -# * deny - An error will be produced and the check will fail -# * warn - A warning will be produced, but the check will not fail -# * allow - No warning or error will be produced, though in some cases a note -# will be - -# The values provided in this template are the default values that will be used -# when any section or field is not specified in your own configuration - -# If 1 or more target triples (and optionally, target_features) are specified, -# only the specified targets will be checked when running `cargo deny check`. -# This means, if a particular package is only ever used as a target specific -# dependency, such as, for example, the `nix` crate only being used via the -# `target_family = "unix"` configuration, that only having windows targets in -# this list would mean the nix crate, as well as any of its exclusive -# dependencies not shared by any other crates, would be ignored, as the target -# list here is effectively saying which targets you are building for. -targets = [ - # The triple can be any string, but only the target triples built in to - # rustc (as of 1.40) can be checked against actual config expressions - #{ triple = "x86_64-unknown-linux-musl" }, - # You can also specify which target_features you promise are enabled for a - # particular target. target_features are currently not validated against - # the actual valid features supported by the target architecture. - #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, -] - -# This section is considered when running `cargo deny check advisories` -# More documentation for the advisories section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html -[advisories] -# The path where the advisory database is cloned/fetched into -db-path = "~/.cargo/advisory-db" -# The url of the advisory database to use -db-urls = ["https://github.com/rustsec/advisory-db"] -# The lint level for security vulnerabilities -vulnerability = "deny" -# The lint level for unmaintained crates -unmaintained = "warn" -# The lint level for crates that have been yanked from their source registry -yanked = "warn" -# The lint level for crates with security notices. Note that as of -# 2019-12-17 there are no security notice advisories in -# https://github.com/rustsec/advisory-db -notice = "warn" -# A list of advisory IDs to ignore. Note that ignored advisories will still -# output a note when they are encountered. -ignore = [ - # yaml-rust < clap. Not feasible to upgrade and also not possible to trigger in practice. - "RUSTSEC-2018-0006", - # We need to wait until Substrate updates their `wasmtime` dependency to fix this. - # TODO: See issue #676: https://github.com/paritytech/parity-bridges-common/issues/676 - "RUSTSEC-2021-0013", - # We need to wait until Substrate updates their `libp2p` dependency to fix this. - # TODO: See issue #681: https://github.com/paritytech/parity-bridges-common/issues/681 - "RUSTSEC-2020-0123", - # We need to wait until Substrate updates their `hyper` dependency to fix this. - # TODO: See issue #710: https://github.com/paritytech/parity-bridges-common/issues/681 - "RUSTSEC-2021-0020", -] -# Threshold for security vulnerabilities, any vulnerability with a CVSS score -# lower than the range specified will be ignored. Note that ignored advisories -# will still output a note when they are encountered. -# * None - CVSS Score 0.0 -# * Low - CVSS Score 0.1 - 3.9 -# * Medium - CVSS Score 4.0 - 6.9 -# * High - CVSS Score 7.0 - 8.9 -# * Critical - CVSS Score 9.0 - 10.0 -#severity-threshold = - -# This section is considered when running `cargo deny check licenses` -# More documentation for the licenses section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html -[licenses] -# The lint level for crates which do not have a detectable license -unlicensed = "deny" -# List of explictly allowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. -allow = [ - "BlueOak-1.0.0" -] -# List of explictly disallowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. -deny = [ - #"Nokia", -] -# Lint level for licenses considered copyleft -copyleft = "allow" -# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses -# * both - The license will be approved if it is both OSI-approved *AND* FSF -# * either - The license will be approved if it is either OSI-approved *OR* FSF -# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF -# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved -# * neither - This predicate is ignored and the default lint level is used -allow-osi-fsf-free = "either" -# Lint level used when no other predicates are matched -# 1. License isn't in the allow or deny lists -# 2. License isn't copyleft -# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" -default = "deny" -# The confidence threshold for detecting a license from license text. -# The higher the value, the more closely the license text must be to the -# canonical license text of a valid SPDX license file. -# [possible values: any between 0.0 and 1.0]. -confidence-threshold = 0.9 -# Allow 1 or more licenses on a per-crate basis, so that particular licenses -# aren't accepted for every possible crate as with the normal allow list -exceptions = [ - # Each entry is the crate and version constraint, and its specific allow - # list - #{ allow = ["Zlib"], name = "adler32", version = "*" }, -] - -# Some crates don't have (easily) machine readable licensing information, -# adding a clarification entry for it allows you to manually specify the -# licensing information -[[licenses.clarify]] -# The name of the crate the clarification applies to -name = "ring" -# THe optional version constraint for the crate -#version = "*" -# The SPDX expression for the license requirements of the crate -expression = "OpenSSL" -# One or more files in the crate's source used as the "source of truth" for -# the license expression. If the contents match, the clarification will be used -# when running the license check, otherwise the clarification will be ignored -# and the crate will be checked normally, which may produce warnings or errors -# depending on the rest of your configuration -license-files = [ - # Each entry is a crate relative path, and the (opaque) hash of its contents - { path = "LICENSE", hash = 0xbd0eed23 } -] -[[licenses.clarify]] -name = "webpki" -expression = "ISC" -license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] - -[licenses.private] -# If true, ignores workspace crates that aren't published, or are only -# published to private registries -ignore = false -# One or more private registries that you might publish crates to, if a crate -# is only published to private registries, and ignore is true, the crate will -# not have its license(s) checked -registries = [ - #"https://sekretz.com/registry -] - -# This section is considered when running `cargo deny check bans`. -# More documentation about the 'bans' section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html -[bans] -# Lint level for when multiple versions of the same crate are detected -multiple-versions = "warn" -# The graph highlighting used when creating dotgraphs for crates -# with multiple versions -# * lowest-version - The path to the lowest versioned duplicate is highlighted -# * simplest-path - The path to the version with the fewest edges is highlighted -# * all - Both lowest-version and simplest-path are used -highlight = "lowest-version" -# List of crates that are allowed. Use with care! -allow = [ - #{ name = "ansi_term", version = "=0.11.0" }, -] -# List of crates to deny -deny = [ - { name = "parity-util-mem", version = "<0.6" } - # Each entry the name of a crate and a version range. If version is - # not specified, all versions will be matched. -] -# Certain crates/versions that will be skipped when doing duplicate detection. -skip = [ - #{ name = "ansi_term", version = "=0.11.0" }, -] -# Similarly to `skip` allows you to skip certain crates during duplicate -# detection. Unlike skip, it also includes the entire tree of transitive -# dependencies starting at the specified crate, up to a certain depth, which is -# by default infinite -skip-tree = [ - #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, -] - -# This section is considered when running `cargo deny check sources`. -# More documentation about the 'sources' section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html -[sources] -# Lint level for what to happen when a crate from a crate registry that is not -# in the allow list is encountered -unknown-registry = "deny" -# Lint level for what to happen when a crate from a git repository that is not -# in the allow list is encountered -unknown-git = "allow" -# List of URLs for allowed crate registries. Defaults to the crates.io index -# if not specified. If it is specified but empty, no registries are allowed. -allow-registry = ["https://github.com/rust-lang/crates.io-index"] -# List of URLs for allowed Git repositories -allow-git = [] diff --git a/polkadot/bridges/deployments/BridgeDeps.Dockerfile b/polkadot/bridges/deployments/BridgeDeps.Dockerfile deleted file mode 100644 index af0f7816e6eee7b5f48cd9abc93c0d38a92e9bcc..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/BridgeDeps.Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -# Image with dependencies required to build projects from the bridge repo. -# -# This image is meant to be used as a building block when building images for -# the various components in the bridge repo, such as nodes and relayers. -FROM ubuntu:xenial - -ENV LAST_DEPS_UPDATE 2020-12-21 - -RUN set -eux; \ - apt-get update && \ - apt-get install -y curl ca-certificates && \ - apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev - -ENV LAST_CERTS_UPDATE 2020-12-21 - -RUN update-ca-certificates && \ - curl https://sh.rustup.rs -sSf | sh -s -- -y - -ENV PATH="/root/.cargo/bin:${PATH}" -ENV LAST_RUST_UPDATE 2020-12-21 - -RUN rustup update stable && \ - rustup install nightly && \ - rustup target add wasm32-unknown-unknown --toolchain nightly - -RUN rustc -vV && \ - cargo -V && \ - gcc -v && \ - g++ -v && \ - cmake --version - -ENV RUST_BACKTRACE 1 diff --git a/polkadot/bridges/deployments/README.md b/polkadot/bridges/deployments/README.md deleted file mode 100644 index 857c8c28e074df6ace5e7962132c196518197ea6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/README.md +++ /dev/null @@ -1,250 +0,0 @@ -# Bridge Deployments - -## Requirements -Make sure to install `docker` and `docker-compose` to be able to run and test bridge deployments. If -for whatever reason you can't or don't want to use Docker, you can find some scripts for running the -bridge [here](https://github.com/svyatonik/parity-bridges-common.test). - -## Networks -One of the building blocks we use for our deployments are _networks_. A network is a collection of -homogenous blockchain nodes. We have Docker Compose files for each network that we want to bridge. -Each of the compose files found in the `./networks` folder is able to independently spin up a -network like so: - -```bash -docker-compose -f ./networks/rialto.yml up -``` - -After running this command we would have a network of several nodes producing blocks. - -## Bridges -A _bridge_ is a way for several _networks_ to connect to one another. Bridge deployments have their -own Docker Compose files which can be found in the `./bridges` folder. These Compose files typically -contain bridge relayers, which are services external to blockchain nodes, and other components such -as testing infrastructure, or user interfaces. - -Unlike the network Compose files, these *cannot* be deployed on their own. They must be combined -with different networks. - -In general, we can deploy the bridge using `docker-compose up` in the following way: - -```bash -docker-compose -f .yml \ - -f .yml \ - -f .yml \ - -f .yml up -``` - -If you want to see how the Compose commands are actually run, check out the source code of the -[`./run.sh`](./run.sh). - -One thing worth noting is that we have a _monitoring_ Compose file. This adds support for Prometheus -and Grafana. We cover these in more details in the [Monitoring](#monitoring) section. At the moment -the monitoring Compose file is _not_ optional, and must be included for bridge deployments. - -### Running and Updating Deployments -We currently support two bridge deployments -1. Ethereum PoA to Rialto Substrate -2. Rialto Substrate to Millau Substrate - -These bridges can be deployed using our [`./run.sh`](./run.sh) script. - -The first argument it takes is the name of the bridge you want to run. Right now we only support two -bridges: `poa-rialto` and `rialto-millau`. - -```bash -./run.sh poa-rialto -``` - -If you add a second `update` argument to the script it will pull the latest images from Docker Hub -and restart the deployment. - -```bash -./run.sh rialto-millau update -``` - -You can also bring down a deployment using the script with the `stop` argument. - -```bash -./run.sh poa-rialto stop -``` - -### Adding Deployments -We need two main things when adding a new deployment. First, the new network which we want to -bridge. A compose file for the network should be added in the `/networks/` folder. Secondly we'll -need a new bridge Compose file in `./bridges/`. This should configure the bridge relayer nodes -correctly for the two networks, and add any additional components needed for the deployment. If you -want you can also add support in the `./run` script for the new deployment. While recommended it's -not strictly required. - -## General Notes - -Rialto authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. -Rialto-PoA authorities are named: `Arthur`, `Bertha`, `Carlos`. -Millau authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. - -Both authorities and following accounts have enough funds (for test purposes) on corresponding Substrate chains: - -- on Rialto: `Ferdie`, `George`, `Harry`. -- on Millau: `Ferdie`, `George`, `Harry`. - -Names of accounts on Substrate (Rialto and Millau) chains may be prefixed with `//` and used as -seeds for the `sr25519` keys. This seed may also be used in the signer argument in Substrate -and PoA relays. Example: - -```bash -./substrate-relay relay-headers rialto-to-millau \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-signer //Harry \ - --prometheus-host=0.0.0.0 -``` - -Some accounts are used by bridge components. Using these accounts to sign other transactions -is not recommended, because this may lead to nonces conflict. - -Following accounts are used when `poa-rialto` bridge is running: - -- Rialto's `Alice` signs relay transactions with new Rialto-PoA headers; -- Rialto's `Bob` signs relay transactions with Rialto-PoA -> Rialto currency exchange proofs. -- Rialto-PoA's `Arthur`: signs relay transactions with new Rialto headers; -- Rialto-PoA's `Bertha`: signs currency exchange transactions. - -Following accounts are used when `rialto-millau` bridge is running: - -- Millau's `Charlie` signs relay transactions with new Rialto headers; -- Rialto's `Charlie` signs relay transactions with new Millau headers; -- Millau's `Dave` signs Millau transactions which contain messages for Rialto; -- Rialto's `Dave` signs Rialto transactions which contain messages for Millau; -- Millau's `Eve` signs relay transactions with message delivery confirmations from Rialto to Millau; -- Rialto's `Eve` signs relay transactions with messages from Millau to Rialto; -- Millau's `Ferdie` signs relay transactions with messages from Rialto to Millau; -- Rialto's `Ferdie` signs relay transactions with message delivery confirmations from Millau to Rialto. - -### Docker Usage -When the network is running you can query logs from individual nodes using: - -```bash -docker logs rialto_poa-node-bertha_1 -f -``` - -To kill all left over containers and start the network from scratch next time: -```bash -docker ps -a --format "{{.ID}}" | xargs docker rm # This removes all containers! -``` - -### Docker Compose Usage -If you're not familiar with how to use `docker-compose` here are some useful commands you'll need -when interacting with the bridge deployments: - -```bash -docker-compose pull # Get the latest images from the Docker Hub -docker-compose build # This is going to build images -docker-compose up # Start all the nodes -docker-compose up -d # Start the nodes in detached mode. -docker-compose down # Stop the network. -``` - -Note that for the you'll need to add the appropriate `-f` arguments that were mentioned in the -[Bridges](#bridges) section. You can read more about using multiple Compose files -[here](https://docs.docker.com/compose/extends/#multiple-compose-files). One thing worth noting is -that the _order_ the compose files are specified in matters. A different order will result in a -different configuration. - -You can sanity check the final config like so: - -```bash -docker-compose -f docker-compose.yml -f docker-compose.override.yml config > docker-compose.merged.yml -``` - -## Docker and Git Deployment -It is also possible to avoid using images from the Docker Hub and instead build -containers from Git. There are two ways to build the images this way. - -### Git Repo -If you have cloned the bridges repo you can build local Docker images by running the following -command at the top level of the repo: - -```bash -docker build . -t local/ --build-arg=PROJECT= -``` - -This will build a local image of a particular component with a tag of -`local/`. This tag can be used in Docker Compose files. - -You can configure the build using using Docker -[build arguments](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg). -Here are the arguments currently supported: - - `BRIDGE_REPO`: Git repository of the bridge node and relay code - - `BRIDGE_HASH`: Commit hash within that repo (can also be a branch or tag) - - `ETHEREUM_REPO`: Git repository of the OpenEthereum client - - `ETHEREUM_HASH`: Commit hash within that repo (can also be a branch or tag) - - `PROJECT`: Project to build withing bridges repo. Can be one of: - - `rialto-bridge-node` - - `millau-bridge-node` - - `ethereum-poa-relay` - - `substrate-relay` - -### GitHub Actions -We have a nightly job which runs and publishes Docker images for the different nodes and relayers to -the [ParityTech Docker Hub](https://hub.docker.com/u/paritytech) organization. These images are used -for our ephemeral (temporary) test networks. Additionally, any time a tag in the form of `v*` is -pushed to GitHub the publishing job is run. This will build all the components (nodes, relayers) and -publish them. - -With images built using either method, all you have to do to use them in a deployment is change the -`image` field in the existing Docker Compose files to point to the tag of the image you want to use. - -### Monitoring -[Prometheus](https://prometheus.io/) is used by the bridge relay to monitor information such as system -resource use, and block data (e.g the best blocks it knows about). In order to visualize this data -a [Grafana](https://grafana.com/) dashboard can be used. - -As part of the Rialto `docker-compose` setup we spin up a Prometheus server and Grafana dashboard. The -Prometheus server connects to the Prometheus data endpoint exposed by the bridge relay. The Grafana -dashboard uses the Prometheus server as its data source. - -The default port for the bridge relay's Prometheus data is `9616`. The host and port can be -configured though the `--prometheus-host` and `--prometheus-port` flags. The Prometheus server's -dashboard can be accessed at `http://localhost:9090`. The Grafana dashboard can be accessed at -`http://localhost:3000`. Note that the default log-in credentials for Grafana are `admin:admin`. - -### Environment Variables -Here is an example `.env` file which is used for production deployments and network updates. For -security reasons it is not kept as part of version control. When deploying a network this -file should be correctly populated and kept in the appropriate [`bridges`](`./bridges`) deployment -folder. - -The `UI_SUBSTRATE_PROVIDER` variable lets you define the url of the Substrate node that the user -interface will connect to. `UI_ETHEREUM_PROVIDER` is used only as a guidance for users to connect -Metamask to the right Ethereum network. `UI_EXPECTED_ETHEREUM_NETWORK_ID` is used by -the user interface as a fail safe to prevent users from connecting their Metamask extension to an -unexpected network. - -```bash -GRAFANA_ADMIN_PASS=admin_pass -GRAFANA_SERVER_ROOT_URL=%(protocol)s://%(domain)s:%(http_port)s/ -GRAFANA_SERVER_DOMAIN=server.domain.io -MATRIX_ACCESS_TOKEN="access-token" -WITH_PROXY=1 # Optional -UI_SUBSTRATE_PROVIDER=ws://localhost:9944 -UI_ETHEREUM_PROVIDER=http://localhost:8545 -UI_EXPECTED_ETHEREUM_NETWORK_ID=105 -``` - -### UI - -Use [wss://rialto.bridges.test-installations.parity.io/](https://polkadot.js.org/apps/) -as a custom endpoint for [https://polkadot.js.org/apps/](https://polkadot.js.org/apps/). - -### Polkadot.js UI - -To teach the UI decode our custom types used in the pallet, go to: `Settings -> Developer` -and import the [`./types.json`](./types.json) - -## Scripts - -The are some bash scripts in `scripts` folder that allow testing `Relay` -without running the entire network within docker. Use if needed for development. diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/Front-end.Dockerfile b/polkadot/bridges/deployments/bridges/poa-rialto/Front-end.Dockerfile deleted file mode 100644 index 427f0504e57d17e0ae7439de6658cd1ff6b4f54f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/Front-end.Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM node:12 as build-deps - -# install tools and dependencies -RUN set -eux; \ - apt-get install -y git - -# clone UI repo -RUN cd /usr/src/ && git clone https://github.com/paritytech/bridge-ui.git -WORKDIR /usr/src/bridge-ui -RUN yarn -ARG SUBSTRATE_PROVIDER -ARG ETHEREUM_PROVIDER -ARG EXPECTED_ETHEREUM_NETWORK_ID - -ENV SUBSTRATE_PROVIDER $SUBSTRATE_PROVIDER -ENV ETHEREUM_PROVIDER $ETHEREUM_PROVIDER -ENV EXPECTED_ETHEREUM_NETWORK_ID $EXPECTED_ETHEREUM_NETWORK_ID - -RUN yarn build:docker - -# Stage 2 - the production environment -FROM nginx:1.12 -COPY --from=build-deps /usr/src/bridge-ui/nginx/*.conf /etc/nginx/conf.d/ -COPY --from=build-deps /usr/src/bridge-ui/dist /usr/share/nginx/html -EXPOSE 80 -CMD ["nginx", "-g", "daemon off;"] diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml deleted file mode 100644 index d671bfb2242d78fa8246a79b254dac9a54c34131..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- name: 'default' - orgId: 1 - folder: '' - type: file - options: - path: '/etc/grafana/provisioning/dashboards' \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-exchange-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-exchange-dashboard.json deleted file mode 100644 index 7e197bb882f8cc924ba200a611fb8feb10b2ab30..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-exchange-dashboard.json +++ /dev/null @@ -1,474 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 7, - "x": 0, - "y": 0 - }, - "id": 2, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_best_block_numbers", - "instant": true, - "interval": "", - "legendFormat": "Best {{type}} block", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best finalized blocks", - "type": "stat" - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 5, - "x": 7, - "y": 0 - }, - "id": 12, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_processed_blocks", - "instant": true, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of processed blocks since last restart", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 6, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 6, - "x": 18, - "y": 0 - }, - "id": 8, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_process_cpu_usage_percentage", - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 7 - }, - "id": 14, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_processed_transactions", - "instant": true, - "interval": "", - "legendFormat": "{{type}} transactions", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of processed transactions since last restart", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Exchange_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Ethereum PoA to Rialto Exchange Dashboard", - "uid": "relay-poa-to-rialto-exchange", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-headers-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-headers-dashboard.json deleted file mode 100644 index 36c2ab94692a5adcea9d6de4b96c5ce6a2d27999..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-poa-to-rialto-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Ethereum PoA to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}) - max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Ethereum PoA to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of Ethereum PoA Headers Synced on Rialto", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Rialto (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Ethereum_to_Substrate_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Ethereum_to_Substrate_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Ethereum PoA to Rialto Header Sync Dashboard", - "uid": "relay-poa-to-rialto-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-rialto-to-poa-headers-dashboard.json b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-rialto-to-poa-headers-dashboard.json deleted file mode 100644 index cac19b3fde549eb9e64937ce5c6f9305042f88c7..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/dashboards/relay-rialto-to-poa-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Rialto to Ethereum PoA)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}) - max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Rialto to Ethereum PoA)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of Rialto Headers Synced on Ethereum PoA", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Ethereum PoA (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Substrate_to_Ethereum_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Substrate_to_Ethereum_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Rialto to Ethereum PoA Header Sync Dashboard", - "uid": "relay-rialto-to-poa-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml deleted file mode 100644 index b85cf06e2bd53a1b9b69f51a64c80b3ba5454611..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# list of datasources to insert/update depending -# whats available in the database -datasources: - # name of the datasource. Required -- name: Prometheus - # datasource type. Required - type: prometheus - # access mode. direct or proxy. Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://prometheus-metrics:9090 - # mark as default datasource. Max one per org - isDefault: true - version: 1 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml deleted file mode 100644 index 4eb6ea3863e976a436cb2884f54fb9bba5c80e50..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml +++ /dev/null @@ -1,15 +0,0 @@ -notifiers: - - name: Matrix - type: webhook - uid: notifier1 - is_default: true - send_reminder: true - frequency: 1h - disable_resolve_message: false - settings: - url: http://grafana-matrix-notifier:4567/hook?rule=bridge - http_method: POST - -delete_notifiers: - - name: Matrix - uid: notifier1 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/prometheus.yml b/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/prometheus.yml deleted file mode 100644 index 8d8e3ae877e29cd676ab495e852765cbbda60705..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/dashboard/prometheus/prometheus.yml +++ /dev/null @@ -1,24 +0,0 @@ -scrape_configs: - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'poa_to_rialto_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-poa-to-rialto:9616'] - - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'poa_exchange_rialto_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-poa-exchange-rialto:9616'] - - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'rialto_to_poa_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-rialto-to-poa:9616'] diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml b/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml deleted file mode 100644 index 13faa0983925a4575b1de23c43ea558d9a197ac9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/docker-compose.yml +++ /dev/null @@ -1,92 +0,0 @@ -# This Compose file should be built using the Rialto and Eth-PoA node -# compose files. Otherwise it won't work. - -version: '3.5' -services: - # We override these nodes to make sure we have the correct chain config for this network. - poa-node-arthur: &poa-node - volumes: - - ./bridges/poa-rialto/poa-config:/config - poa-node-bertha: - <<: *poa-node - poa-node-carlos: - <<: *poa-node - - # We provide an override for this particular node since this is a public facing - # node which we use to connect from things like Polkadot JS Apps. - rialto-node-charlie: - environment: - VIRTUAL_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link - VIRTUAL_PORT: 9944 - LETSENCRYPT_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - - relay-headers-poa-to-rialto: ð-poa-relay - image: paritytech/ethereum-poa-relay - entrypoint: /entrypoints/relay-headers-poa-to-rialto-entrypoint.sh - volumes: - - ./bridges/poa-rialto/entrypoints:/entrypoints - environment: - RUST_LOG: rpc=trace,bridge=trace - ports: - - "9616:9616" - depends_on: &all-nodes - - poa-node-arthur - - poa-node-bertha - - poa-node-carlos - - rialto-node-alice - - rialto-node-bob - - rialto-node-charlie - - rialto-node-dave - - rialto-node-eve - - relay-poa-exchange-rialto: - <<: *eth-poa-relay - entrypoint: /entrypoints/relay-poa-exchange-rialto-entrypoint.sh - ports: - - "9716:9616" - - relay-headers-rialto-to-poa: - <<: *eth-poa-relay - entrypoint: /entrypoints/relay-headers-rialto-to-poa-entrypoint.sh - ports: - - "9816:9616" - - poa-exchange-tx-generator: - <<: *eth-poa-relay - entrypoint: /entrypoints/poa-exchange-tx-generator-entrypoint.sh - environment: - EXCHANGE_GEN_MIN_AMOUNT_FINNEY: ${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} - EXCHANGE_GEN_MAX_AMOUNT_FINNEY: ${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} - EXCHANGE_GEN_MAX_SUBMIT_DELAY_S: ${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} - ports: - - "9916:9616" - depends_on: - - relay-headers-poa-to-rialto - - relay-headers-rialto-to-poa - - front-end: - build: - context: . - dockerfile: ./bridges/poa-rialto/Front-end.Dockerfile - args: - SUBSTRATE_PROVIDER: ${UI_SUBSTRATE_PROVIDER:-ws://localhost:9944} - ETHEREUM_PROVIDER: ${UI_ETHEREUM_PROVIDER:-http://localhost:8545} - EXPECTED_ETHEREUM_NETWORK_ID: ${UI_EXPECTED_ETHEREUM_NETWORK_ID:-105} - ports: - - "8080:80" - - # Note: These are being overridden from the top level `monitoring` compose file. - prometheus-metrics: - volumes: - - ./bridges/poa-rialto/dashboard/prometheus/:/etc/prometheus/ - depends_on: *all-nodes - - grafana-dashboard: - volumes: - - ./bridges/poa-rialto/dashboard/grafana/provisioning/:/etc/grafana/provisioning/ - environment: - VIRTUAL_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link - VIRTUAL_PORT: 3000 - LETSENCRYPT_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh deleted file mode 100755 index 04c9292b21df2e29657025eac80a5d829a69da69..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls relay binary to generate PoA -> Substrate -# exchange transaction from hardcoded PoA senders (assuming they have -# enough funds) to hardcoded Substrate recipients. - -set -eu - -# Path to relay binary -RELAY_BINARY_PATH=${RELAY_BINARY_PATH:-./ethereum-poa-relay} -# Ethereum node host -ETH_HOST=${ETH_HOST:-poa-node-arthur} -# Ethereum node port -ETH_PORT=${ETH_PORT:-8545} -# Ethereum chain id -ETH_CHAIN_ID=${ETH_CHAIN_ID:-105} - -# All possible Substrate recipients (hex-encoded public keys) -SUB_RECIPIENTS=( - # Alice (5GrwvaEF...) - "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"\ - # Bob (5FHneW46...) - "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48"\ - # Charlie (5FLSigC9...) - "90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22"\ - # Dave (5DAAnrj7...) - "306721211d5404bd9da88e0204360a1a9ab8b87c66c1bc2fcdd37f3c2222cc20"\ - # Eve (5HGjWAeF...) - "e659a7a1628cdd93febc04a4e0646ea20e9f5f0ce097d9a05290d4a9e054df4e"\ - # Ferdie (5CiPPseX...) - "1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c" -) -# All possible Ethereum signers (hex-encoded private keys) -# (note that we're tracking nonce here => sender must not send concurrent transactions) -ETH_SIGNERS=( - # Bertha account (0x007594304039c2937a12220338aab821d819f5a4) and its current nonce (unknown by default) - "bc10e0f21e33456ade82182dd1ebdbdd89bca923d4e4adbd90fb5b44d7098cbe" "" -) -# Minimal exchange amount (in finney) -MIN_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} # 0.1 ETH -# Maximal exchange amount (in finney) -MAX_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} # 100 ETH -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} - -while true -do - # sleep some time - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S - - # select recipient - SUB_RECIPIENTS_MAX_INDEX=$((${#SUB_RECIPIENTS[@]} - 1)) - SUB_RECIPIENT_INDEX=`shuf -i 0-$SUB_RECIPIENTS_MAX_INDEX -n 1` - SUB_RECIPIENT=${SUB_RECIPIENTS[$SUB_RECIPIENT_INDEX]} - - # select signer - ETH_SIGNERS_MAX_INDEX=$(((${#ETH_SIGNERS[@]} - 1) / 2)) - ETH_SIGNERS_INDEX=`shuf -i 0-$ETH_SIGNERS_MAX_INDEX -n 1` - ETH_SIGNER_INDEX=$(($ETH_SIGNERS_INDEX * 2)) - ETH_SIGNER_NONCE_INDEX=$(($ETH_SIGNER_INDEX + 1)) - ETH_SIGNER=${ETH_SIGNERS[$ETH_SIGNER_INDEX]} - ETH_SIGNER_NONCE=${ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]} - if [ -z $ETH_SIGNER_NONCE ]; then - ETH_SIGNER_NONCE_ARG= - else - ETH_SIGNER_NONCE_ARG=`printf -- "--eth-nonce=%s" $ETH_SIGNER_NONCE` - fi - - # select amount - EXCHANGE_AMOUNT_FINNEY=`shuf -i $MIN_EXCHANGE_AMOUNT_FINNEY-$MAX_EXCHANGE_AMOUNT_FINNEY -n 1` - EXCHANGE_AMOUNT_ETH=`printf "%s000" $EXCHANGE_AMOUNT_FINNEY` - - # submit transaction - echo "Sending $EXCHANGE_AMOUNT_ETH from PoA:$ETH_SIGNER to Substrate:$SUB_RECIPIENT. Nonce: $ETH_SIGNER_NONCE" - set -x - SUBMIT_OUTPUT=`$RELAY_BINARY_PATH 2>&1 eth-submit-exchange-tx \ - --sub-recipient=$SUB_RECIPIENT \ - --eth-host=$ETH_HOST \ - --eth-port=$ETH_PORT \ - --eth-chain-id=$ETH_CHAIN_ID \ - --eth-signer=$ETH_SIGNER \ - --eth-amount=$EXCHANGE_AMOUNT_ETH \ - $ETH_SIGNER_NONCE_ARG` - set +x - - # update sender nonce - SUBMIT_OUTPUT_RE='nonce: ([0-9]+)' - if [[ $SUBMIT_OUTPUT =~ $SUBMIT_OUTPUT_RE ]]; then - ETH_SIGNER_NONCE=${BASH_REMATCH[1]} - ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]=$(($ETH_SIGNER_NONCE + 1)) - else - echo "Missing nonce in relay response: $SUBMIT_OUTPUT" - exit 1 - fi -done diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh deleted file mode 100755 index 2f051d40d5c82aa4e53f9b09f2b749a021ff095e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://poa-node-arthur:8545/api/health -curl -v http://poa-node-bertha:8545/api/health -curl -v http://poa-node-carlos:8545/api/health -curl -v http://rialto-node-alice:9933/health -curl -v http://rialto-node-bob:9933/health -curl -v http://rialto-node-charlie:9933/health - -/home/user/ethereum-poa-relay eth-to-sub \ - --sub-host rialto-node-alice \ - --eth-host poa-node-arthur \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh deleted file mode 100755 index 1e51d2d32d1a1ff9dfd6fa6543b2ea9894f308a5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 10 - -curl -v http://rialto-node-bob:9933/health -curl -v http://poa-node-bertha:8545/api/health - -# Try to deploy contracts first -# networkID = 0x69 -# Arthur's key. -/home/user/ethereum-poa-relay eth-deploy-contract \ - --eth-chain-id 105 \ - --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ - --sub-host rialto-node-bob \ - --eth-host poa-node-bertha || echo "Failed to deploy contracts." - -sleep 10 -echo "Starting SUB -> ETH relay" -/home/user/ethereum-poa-relay sub-to-eth \ - --eth-contract c9a61fb29e971d1dabfd98657969882ef5d0beee \ - --eth-chain-id 105 \ - --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ - --sub-host rialto-node-bob \ - --eth-host poa-node-bertha \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh deleted file mode 100755 index 7be12000b9153ab2b610cf4d0aeddb67978c610d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://poa-node-arthur:8545/api/health -curl -v http://poa-node-bertha:8545/api/health -curl -v http://poa-node-carlos:8545/api/health -curl -v http://rialto-node-alice:9933/health -curl -v http://rialto-node-bob:9933/health -curl -v http://rialto-node-charlie:9933/health - -/home/user/ethereum-poa-relay eth-exchange-sub \ - --sub-host rialto-node-alice \ - --sub-signer //Bob \ - --eth-host poa-node-arthur \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json deleted file mode 100644 index 9e26dfeeb6e641a33dae4961196235bdb965b21b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json deleted file mode 100644 index fa59a46480c27c74d5d675908885b70e37c68330..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json deleted file mode 100644 index 7168ec4f71f73133dadc461a4c8dac0fe029bc8e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"6d1e690f-0b52-35f7-989b-46100e7c65ed","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a5b4d0466834e75c9fd29c6cbbac57ad"},"ciphertext":"102ac328cbe66d8cb8515c42e3268776a9be4419a5cb7b79852860b1e691c15b","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"e8daf2e70086b0cacf925d368fd3f60cada1285e39a42c4cc73c135368cfdbef"},"mac":"1bc3b750900a1143c64ba9e677d69e1093aab47cb003ba09f3cd595a3b422db5"},"address":"007594304039c2937a12220338aab821d819f5a4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json deleted file mode 100644 index 2f9759f7bdfe36634675b9a0123a4e6f16da2258..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"ffaebba1-f1b9-8758-7034-0314040b1396","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"97f124bc8a7bf55d00eb2755c2b50364"},"ciphertext":"b87827816f33d2bef2dc3102a8a7744b86912f8ace10e45cb282a13487769ed2","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"3114c67a05bff7831d112083f566b176bfc874aea160eebadbe5564e406ee85c"},"mac":"e9bfe8fd6f612bc036bb57659297fc03db022264f5086a1b5726972d3ab6f64a"},"address":"004e7a39907f090e19b0b80a277e77b72b22e269","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json deleted file mode 100644 index f1df56b841364039d3a325418bd9195cb87b5f91..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"ef9eb431-dc73-cf31-357e-736f64febe68","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"7077f1c4170d9fc2e05c5956be32fb51"},"ciphertext":"a053be448768d984257aeb8f9c7913e3f54c6e6e741accad9f09dd70c2d9828c","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"12580aa4624040970301e7474d3f9b2a93552bfe9ea2517f7119ccf8e91ebd0d"},"mac":"796dbb48adcfc09041fe39121632801d9f950d3c73dd47105180d8097d4f4491"},"address":"00eed42bf93b498f28acd21d207427a14074defe","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/pass b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/pass deleted file mode 100644 index f3097ab13082b70f67202aab7dd9d1b35b7ceac2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/pass +++ /dev/null @@ -1 +0,0 @@ -password diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa-node-config b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa-node-config deleted file mode 100644 index 2b3c56453d7b3d8535cb2c8853e883e34e48e088..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa-node-config +++ /dev/null @@ -1,20 +0,0 @@ -[parity] -chain = "/config/poa.json" -keys_path = "/config/keys" -no_persistent_txqueue = true - -[account] -password = ["/config/pass"] - -[network] -reserved_peers = "/config/reserved" - -[rpc] -apis = ["all"] -cors = ["moz-extension://*", "chrome-extension://*"] - -[mining] -force_sealing = true - -[misc] -unsafe_expose = true diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa.json b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa.json deleted file mode 100644 index 12a8a58f263bb08c81f0e0994dfb3d21865db46d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/poa.json +++ /dev/null @@ -1,184 +0,0 @@ -{ - "name": "BridgePoa", - "engine": { - "authorityRound": { - "params": { - "stepDuration": 10, - "validators": { - "list": [ - "0x005e714f896a8b7cede9d38688c1a81de72a58e4", - "0x007594304039c2937a12220338aab821d819f5a4", - "0x004e7a39907f090e19b0b80a277e77b72b22e269" - ] - }, - "validateScoreTransition": 0, - "validateStepTransition": 0, - "maximumUncleCountTransition": 0, - "maximumUncleCount": 0, - "emptyStepsTransition": "0xfffffffff", - "maximumEmptySteps": 1 - } - } - }, - "params": { - "accountStartNonce": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip140Transition": "0x0", - "eip145Transition": "0x0", - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip98Transition": "0x7fffffffffffff", - "gasLimitBoundDivisor": "0x0400", - "maxCodeSize": 24576, - "maxCodeSizeTransition": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID" : "0x69", - "validateChainIdTransition": "0x0", - "validateReceiptsTransition": "0x0" - }, - "genesis": { - "seal": { - "authorityRound": { - "step": "0x0", - "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x20000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData": "0x", - "gasLimit": "0x222222" - }, - "accounts": { - "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, - "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, - "0000000000000000000000000000000000000006": { - "balance": "1", - "builtin": { - "name": "alt_bn128_add", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 500 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 150 }} - } - } - } - }, - "0000000000000000000000000000000000000007": { - "balance": "1", - "builtin": { - "name": "alt_bn128_mul", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 40000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 6000 }} - } - } - } - }, - "0000000000000000000000000000000000000008": { - "balance": "1", - "builtin": { - "name": "alt_bn128_pairing", - "pricing": { - "0": { - "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} - } - } - } - }, - "0x0000000000000000000000000000000000000009": { - "builtin": { - "name": "blake2_f", - "activate_at": "0xd751a5", - "pricing": { - "blake2_f": { - "gas_per_round": 1 - } - } - } - }, - "0x0000000000000000000000000000000000000010": { - "builtin": { - "name": "parse_substrate_header", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000011": { - "builtin": { - "name": "get_substrate_header_signal", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000012": { - "builtin": { - "name": "verify_substrate_finality_proof", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000013": { - "builtin": { - "name": "my_test", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x007594304039c2937a12220338aab821d819f5a4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x004e7a39907f090e19b0b80a277e77b72b22e269": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x00eed42bf93b498f28acd21d207427a14074defe": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - } - } -} diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/reserved b/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/reserved deleted file mode 100644 index 209d71b7fb30f9e49a635192ffac5775a8188e58..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/poa-rialto/poa-config/reserved +++ /dev/null @@ -1,3 +0,0 @@ -enode://543d0874df46dff238d62547160f9d11e3d21897d7041bbbe46a04d2ee56d9eaf108f2133c0403159624f7647198e224d0755d23ad0e1a50c0912973af6e8a8a@poa-node-arthur:30303 -enode://710de70733e88a24032e53054985f7239e37351f5f3335a468a1a78a3026e9f090356973b00262c346a6608403df2c7107fc4def2cfe4995ea18a41292b9384f@poa-node-bertha:30303 -enode://943525f415b9482f1c49bd39eb979e4e2b406f4137450b0553bffa5cba2928e25ff89ef70f7325aad8a75dbb5955eaecc1aee7ac55d66bcaaa07c8ea58adb23a@poa-node-carlos:30303 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml deleted file mode 100644 index d671bfb2242d78fa8246a79b254dac9a54c34131..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/grafana-dashboard.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- name: 'default' - orgId: 1 - folder: '' - type: file - options: - path: '/etc/grafana/provisioning/dashboards' \ No newline at end of file diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-headers-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-headers-dashboard.json deleted file mode 100644 index 2dc4f8a4182921dec8b3ad9607bb05667dc501d7..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Millau to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Millau_to_Rialto_Sync_best_block_numbers{node=\"source\"}) - max(Millau_to_Rialto_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Millau to Rialto)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Millau_to_Rialto_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Millau_to_Rialto_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of Millau Headers Synced on Rialto", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Rialto (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Millau_to_Rialto_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Millau to Rialto Header Sync Dashboard", - "uid": "relay-millau-to-rialto-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-messages-dashboard.json deleted file mode 100644 index 69c07f8715f8bd5a7844e5a8b05905996655b992..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-millau-to-rialto-messages-dashboard.json +++ /dev/null @@ -1,1137 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Rialto\", \"type\", \"target\"), \"type\", \"At Millau\", \"type\", \"target_at_source\")", - "instant": false, - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Rialto headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Millau\", \"type\", \"source\"), \"type\", \"At Rialto\", \"type\", \"source_at_target\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Millau headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages generated at Millau are not detected by relay", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "sum" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages from Millau to Rialto are not being delivered", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 20 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Undelivered messages at Rialto", - "refId": "A" - }, - { - "expr": "increase(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", - "interval": "", - "legendFormat": "Messages delivered to Rialto in last 1m", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Too many unconfirmed messages", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 20 - }, - "hiddenSeries": false, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed messages at Millau", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Rewards are not being confirmed", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 20 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Rialto", - "refId": "A" - }, - { - "expr": "(scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Rialto (zero if messages are not being delivered to Rialto)", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Reward lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 27 - }, - "id": 16, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.0.6", - "targets": [ - { - "expr": "avg_over_time(Millau_to_Rialto_MessageLane_00000000_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay process CPU usage (1 CPU = 100)", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 27 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_MessageLane_00000000_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "System load average", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 27 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory used by relay process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 25, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Millau to Rialto Message Sync Dashboard", - "uid": "relay-millau-to-rialto-messages", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-headers-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-headers-dashboard.json deleted file mode 100644 index 1f9176ddba1bc0935632473ff4fa8b0a5f8c7ee9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-headers-dashboard.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 5 (Rialto to Millau)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Rialto_to_Millau_Sync_best_block_numbers{node=\"source\"}) - max(Rialto_to_Millau_Sync_best_block_numbers{node=\"target\"})", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 5 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "3m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Rialto to Millau)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Rialto_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Rialto_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])", - "interval": "", - "legendFormat": "Number of Rialto Headers Synced on Millau", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Millau (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Rialto_to_Millau_Sync_best_block_numbers", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Header on {{node}} Node", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_Sync_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(Rialto_to_Millau_Sync_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "id": 4, - "options": { - "displayMode": "gradient", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showUnfilled": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Rialto_to_Millau_Sync_blocks_in_state", - "instant": true, - "interval": "", - "legendFormat": "{{state}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Queued Headers in Relay", - "type": "bargauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_Sync_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Rialto to Millau Header Sync Dashboard", - "uid": "relay-rialto-to-millau-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-messages-dashboard.json deleted file mode 100644 index 138d1f7f2c030b54d6b853c9cba3483a35db68e2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/dashboards/relay-rialto-to-millau-messages-dashboard.json +++ /dev/null @@ -1,1137 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Millau\", \"type\", \"target\"), \"type\", \"At Rialto\", \"type\", \"target_at_source\")", - "instant": false, - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Millau headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Rialto\", \"type\", \"source\"), \"type\", \"At Millau\", \"type\", \"source_at_target\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Rialto headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages generated at Rialto are not detected by relay", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "sum" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages from Rialto to Millau are not being delivered", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 20 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Undelivered messages at Millau", - "refId": "A" - }, - { - "expr": "increase(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", - "interval": "", - "legendFormat": "Messages delivered to Millau in last 1m", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Too many unconfirmed messages", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 20 - }, - "hiddenSeries": false, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed messages at Rialto", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Rewards are not being confirmed", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 20 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Millau", - "refId": "A" - }, - { - "expr": "(scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Millau (zero if messages are not being delivered to Millau)", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Reward lags", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 27 - }, - "id": 16, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.0.6", - "targets": [ - { - "expr": "avg_over_time(Rialto_to_Millau_MessageLane_00000000_process_cpu_usage_percentage[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay process CPU usage (1 CPU = 100)", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 27 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_MessageLane_00000000_system_average_load", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "System load average", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 27 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory used by relay process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 25, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Rialto to Millau Message Sync Dashboard", - "uid": "relay-rialto-to-millau-messages", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml deleted file mode 100644 index b85cf06e2bd53a1b9b69f51a64c80b3ba5454611..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/datasources/grafana-datasource.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# list of datasources to insert/update depending -# whats available in the database -datasources: - # name of the datasource. Required -- name: Prometheus - # datasource type. Required - type: prometheus - # access mode. direct or proxy. Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://prometheus-metrics:9090 - # mark as default datasource. Max one per org - isDefault: true - version: 1 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml deleted file mode 100644 index 4eb6ea3863e976a436cb2884f54fb9bba5c80e50..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/provisioning/notifiers/grafana-notifier.yaml +++ /dev/null @@ -1,15 +0,0 @@ -notifiers: - - name: Matrix - type: webhook - uid: notifier1 - is_default: true - send_reminder: true - frequency: 1h - disable_resolve_message: false - settings: - url: http://grafana-matrix-notifier:4567/hook?rule=bridge - http_method: POST - -delete_notifiers: - - name: Matrix - uid: notifier1 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/prometheus.yml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/prometheus.yml deleted file mode 100644 index 763eaf35b3fbe4eb76213261469d8f20ce04b460..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/prometheus.yml +++ /dev/null @@ -1,23 +0,0 @@ -scrape_configs: - # The job name is added as a label `job=` to any timeseries scraped from this config. - - job_name: 'millau_to_rialto_headers_relay_node' - - # Override the global default and scrape targets from this job every 15 seconds. - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-millau-to-rialto:9616'] - - - job_name: 'rialto_to_millau_headers_relay_node' - scrape_interval: 15s - static_configs: - - targets: ['relay-headers-rialto-to-millau:9616'] - - - job_name: 'millau_to_rialto_messages_relay_node' - scrape_interval: 15s - static_configs: - - targets: ['relay-messages-millau-to-rialto:9616'] - - - job_name: 'rialto_to_millau_messages_relay_node' - scrape_interval: 15s - static_configs: - - targets: ['relay-messages-rialto-to-millau:9616'] diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml deleted file mode 100644 index d7a360a2b65d9750afe8cb5ccd2c115ba57aa0a3..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml +++ /dev/null @@ -1,97 +0,0 @@ -version: '3.5' -services: - # We provide overrides for these particular nodes since they are public facing - # nodes which we use to connect from things like Polkadot JS Apps. - rialto-node-charlie: - environment: - VIRTUAL_HOST: wss.rialto.brucke.link - VIRTUAL_PORT: 9944 - LETSENCRYPT_HOST: wss.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - - millau-node-charlie: - environment: - VIRTUAL_HOST: wss.millau.brucke.link - VIRTUAL_PORT: 9944 - LETSENCRYPT_HOST: wss.millau.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - - relay-headers-millau-to-rialto: &sub-bridge-relay - image: paritytech/substrate-relay - entrypoint: /entrypoints/relay-headers-millau-to-rialto-entrypoint.sh - volumes: - - ./bridges/rialto-millau/entrypoints:/entrypoints - environment: - RUST_LOG: rpc=trace,bridge=trace - ports: - - "9616:9616" - depends_on: &all-nodes - - millau-node-alice - - millau-node-bob - - millau-node-charlie - - millau-node-dave - - millau-node-eve - - rialto-node-alice - - rialto-node-bob - - rialto-node-charlie - - rialto-node-dave - - rialto-node-eve - - relay-headers-rialto-to-millau: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-headers-rialto-to-millau-entrypoint.sh - ports: - - "9716:9616" - - relay-messages-millau-to-rialto: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-messages-millau-to-rialto-entrypoint.sh - environment: - RUST_LOG: rpc=trace,bridge=trace,jsonrpsee=trace,soketto=trace - ports: - - "9816:9616" - depends_on: - - relay-headers-millau-to-rialto - - relay-headers-rialto-to-millau - - relay-messages-millau-to-rialto-generator: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-messages-to-rialto-generator-entrypoint.sh - environment: - RUST_LOG: rpc=trace,bridge=trace,jsonrpsee=trace,soketto=trace - ports: - - "9916:9616" - depends_on: - - relay-messages-millau-to-rialto - - relay-messages-rialto-to-millau: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-messages-rialto-to-millau-entrypoint.sh - ports: - - "10016:9616" - depends_on: - - relay-headers-millau-to-rialto - - relay-headers-rialto-to-millau - - relay-messages-rialto-to-millau-generator: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-messages-to-millau-generator-entrypoint.sh - ports: - - "10116:9616" - depends_on: - - relay-messages-rialto-to-millau - - # Note: These are being overridden from the top level `monitoring` compose file. - grafana-dashboard: - environment: - VIRTUAL_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link - VIRTUAL_PORT: 3000 - LETSENCRYPT_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - volumes: - - ./bridges/rialto-millau/dashboard/grafana/provisioning/:/etc/grafana/provisioning/ - - prometheus-metrics: - volumes: - - ./bridges/rialto-millau/dashboard/prometheus/:/etc/prometheus/ - depends_on: *all-nodes diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-millau-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-millau-to-rialto-entrypoint.sh deleted file mode 100755 index e7b073d967efc7409c8c30f1ca009067c3a83db4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-millau-to-rialto-entrypoint.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://millau-node-alice:9933/health -curl -v http://rialto-node-alice:9933/health - -/home/user/substrate-relay init-bridge millau-to-rialto \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --rialto-signer //Alice - -# Give chain a little bit of time to process initialization transaction -sleep 6 -/home/user/substrate-relay relay-headers millau-to-rialto \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --rialto-signer //Charlie \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-rialto-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-rialto-to-millau-entrypoint.sh deleted file mode 100755 index f3fa7597b28329f13fe0636257ea75ea442845e5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-headers-rialto-to-millau-entrypoint.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://millau-node-alice:9933/health -curl -v http://rialto-node-alice:9933/health - -/home/user/substrate-relay init-bridge rialto-to-millau \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --millau-signer //Alice - -# Give chain a little bit of time to process initialization transaction -sleep 6 -/home/user/substrate-relay relay-headers rialto-to-millau \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --millau-signer //Charlie \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh deleted file mode 100755 index 5b92a9b0135cef9743abb2536854f0d5f4d66708..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://millau-node-bob:9933/health -curl -v http://rialto-node-bob:9933/health - -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} - -/home/user/substrate-relay relay-messages millau-to-rialto \ - --lane $MESSAGE_LANE \ - --millau-host millau-node-bob \ - --millau-port 9944 \ - --millau-signer //Eve \ - --rialto-host rialto-node-bob \ - --rialto-port 9944 \ - --rialto-signer //Eve \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh deleted file mode 100755 index 6d23b8d236d3a4a1278bf8aa742b8d26ff1acfff..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 3 -curl -v http://millau-node-bob:9933/health -curl -v http://rialto-node-bob:9933/health - -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} - -/home/user/substrate-relay relay-messages rialto-to-millau \ - --lane $MESSAGE_LANE \ - --rialto-host rialto-node-bob \ - --rialto-port 9944 \ - --rialto-signer //Ferdie \ - --millau-host millau-node-bob \ - --millau-port 9944 \ - --millau-signer //Ferdie \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh deleted file mode 100755 index 606a3f4e51482c0bb2c94775f2a4b43c30430226..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls the Substrate relay binary to generate messages. These messages -# are sent from the Rialto network to the Millau network. - -set -eu - -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -FERDIE_ADDR=5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL - -SHARED_CMD="/home/user/substrate-relay send-message rialto-to-millau" -SHARED_HOST="--rialto-host rialto-node-bob --rialto-port 9944" -DAVE_SIGNER="--rialto-signer //Dave --millau-signer //Dave" - -SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" - -# Sleep a bit between messages -rand_sleep() { - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S -} - -while true -do - rand_sleep - echo "Sending Remark from Rialto to Millau using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Target \ - remark - - rand_sleep - echo "Sending Transfer from Rialto to Millau using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Target \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR - - rand_sleep - echo "Sending Remark from Rialto to Millau using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Source \ - remark - - rand_sleep - echo "Sending Transfer from Rialto to Millau using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Source \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR -done diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh deleted file mode 100755 index 66d8e2a0766abfe4d5403593289cbca927af2a74..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls the Substrate relay binary to generate messages. These messages -# are sent from the Millau network to the Rialto network. - -set -eu - -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -FERDIE_ADDR=5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL - -SHARED_CMD=" /home/user/substrate-relay send-message millau-to-rialto" -SHARED_HOST="--millau-host millau-node-bob --millau-port 9944" -DAVE_SIGNER="--rialto-signer //Dave --millau-signer //Dave" - -SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" - -# Sleep a bit between messages -rand_sleep() { - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S -} - -while true -do - rand_sleep - echo "Sending Remark from Millau to Rialto using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Target \ - remark - - rand_sleep - echo "Sending Transfer from Millau to Rialto using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Target \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR - - rand_sleep - echo "Sending Remark from Millau to Rialto using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Source \ - remark - - rand_sleep - echo "Sending Transfer from Millau to Rialto using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --origin Source \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR -done diff --git a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/address_book.json b/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/address_book.json deleted file mode 100644 index 9e26dfeeb6e641a33dae4961196235bdb965b21b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/address_book.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/arthur.json b/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/arthur.json deleted file mode 100644 index fa59a46480c27c74d5d675908885b70e37c68330..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/keys/BridgePoa/arthur.json +++ /dev/null @@ -1 +0,0 @@ -{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/bridges/deployments/dev/poa-config/pass b/polkadot/bridges/deployments/dev/poa-config/pass deleted file mode 100644 index f3097ab13082b70f67202aab7dd9d1b35b7ceac2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/pass +++ /dev/null @@ -1 +0,0 @@ -password diff --git a/polkadot/bridges/deployments/dev/poa-config/poa-node-config b/polkadot/bridges/deployments/dev/poa-config/poa-node-config deleted file mode 100644 index 146bbac17cf9e12bff3f7ecb12515cb2642bc5b9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/poa-node-config +++ /dev/null @@ -1,17 +0,0 @@ -[parity] -chain = "./deployments/dev/poa-config/poa.json" -keys_path = "./deployments/dev/poa-config/keys" -no_persistent_txqueue = true - -[account] -password = ["./deployments/dev/poa-config/pass"] - -[rpc] -apis = ["all"] -cors = ["moz-extension://*", "chrome-extension://*"] - -[mining] -force_sealing = true - -[misc] -unsafe_expose = true diff --git a/polkadot/bridges/deployments/dev/poa-config/poa.json b/polkadot/bridges/deployments/dev/poa-config/poa.json deleted file mode 100644 index ecc21766b035907ac5cfcc61bcf70752d93c4ee6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/dev/poa-config/poa.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "name": "BridgePoa", - "engine": { - "authorityRound": { - "params": { - "stepDuration": 10, - "validators": { - "list": [ - "0x005e714f896a8b7cede9d38688c1a81de72a58e4" - ] - }, - "validateScoreTransition": 0, - "validateStepTransition": 0, - "maximumUncleCountTransition": 0, - "maximumUncleCount": 0, - "emptyStepsTransition": "0xfffffffff", - "maximumEmptySteps": 1 - } - } - }, - "params": { - "accountStartNonce": "0x0", - "eip1014Transition": "0x0", - "eip1052Transition": "0x0", - "eip140Transition": "0x0", - "eip145Transition": "0x0", - "eip150Transition": "0x0", - "eip155Transition": "0x0", - "eip160Transition": "0x0", - "eip161abcTransition": "0x0", - "eip161dTransition": "0x0", - "eip211Transition": "0x0", - "eip214Transition": "0x0", - "eip658Transition": "0x0", - "eip98Transition": "0x7fffffffffffff", - "gasLimitBoundDivisor": "0x0400", - "maxCodeSize": 24576, - "maxCodeSizeTransition": "0x0", - "maximumExtraDataSize": "0x20", - "minGasLimit": "0x1388", - "networkID" : "0x69", - "validateChainIdTransition": "0x0", - "validateReceiptsTransition": "0x0" - }, - "genesis": { - "seal": { - "authorityRound": { - "step": "0x0", - "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" - } - }, - "difficulty": "0x20000", - "author": "0x0000000000000000000000000000000000000000", - "timestamp": "0x00", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData": "0x", - "gasLimit": "0x222222" - }, - "accounts": { - "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, - "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, - "0000000000000000000000000000000000000006": { - "balance": "1", - "builtin": { - "name": "alt_bn128_add", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 500 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 150 }} - } - } - } - }, - "0000000000000000000000000000000000000007": { - "balance": "1", - "builtin": { - "name": "alt_bn128_mul", - "pricing": { - "0": { - "price": { "alt_bn128_const_operations": { "price": 40000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_const_operations": { "price": 6000 }} - } - } - } - }, - "0000000000000000000000000000000000000008": { - "balance": "1", - "builtin": { - "name": "alt_bn128_pairing", - "pricing": { - "0": { - "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} - }, - "0x7fffffffffffff": { - "info": "EIP 1108 transition", - "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} - } - } - } - }, - "0x0000000000000000000000000000000000000009": { - "builtin": { - "name": "blake2_f", - "activate_at": "0xd751a5", - "pricing": { - "blake2_f": { - "gas_per_round": 1 - } - } - } - }, - "0x0000000000000000000000000000000000000010": { - "builtin": { - "name": "parse_substrate_header", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000011": { - "builtin": { - "name": "get_substrate_header_signal", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000012": { - "builtin": { - "name": "verify_substrate_finality_proof", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x0000000000000000000000000000000000000013": { - "builtin": { - "name": "my_test", - "pricing": { - "linear": { - "base": 3000, - "word": 0 - } - } - } - }, - "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x007594304039c2937a12220338aab821d819f5a4": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - }, - "0x004e7a39907f090e19b0b80a277e77b72b22e269": { - "balance": "1606938044258990275541962092341162602522202993782792835301376", - "nonce": "0x1" - } - } -} diff --git a/polkadot/bridges/deployments/local-scripts/bridge-entrypoint.sh b/polkadot/bridges/deployments/local-scripts/bridge-entrypoint.sh deleted file mode 100755 index 5c1b6e90ec27f50529a2d39c5447f6b7fe17e6e0..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/local-scripts/bridge-entrypoint.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -xeu - -# This will allow us to run whichever binary the user wanted -# with arguments passed through `docker run` -# e.g `docker run -it rialto-bridge-node-dev --dev --tmp` -/home/user/$PROJECT $@ diff --git a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh deleted file mode 100755 index 205d7e62f8e5fce0e197249aba30b9bf59c3e1fd..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# A script for relaying Millau headers to the Rialto chain. -# -# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` -# and `run-millau-node.sh). - -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge millau-to-rialto \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --rialto-signer //Alice \ - -sleep 5 -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers millau-to-rialto \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --rialto-signer //Alice \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh deleted file mode 100755 index 710b317fa003b34763c44ddb8785547cdaf2f072..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# A script for relaying Rialto headers to the Millau chain. -# -# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` -# and `run-millau-node.sh). - -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge rialto-to-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Alice \ - -sleep 5 -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers rialto-to-millau \ - --millau-host localhost \ - --millau-port 9945 \ - --rialto-host localhost \ - --rialto-port 9944 \ - --millau-signer //Alice \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/run-millau-node.sh b/polkadot/bridges/deployments/local-scripts/run-millau-node.sh deleted file mode 100755 index 6665c09af5755a6c16e73c3469a92cd1998882af..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/local-scripts/run-millau-node.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Millau Substrate bridge node. - -RUST_LOG=runtime=trace \ -./target/debug/millau-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33044 --rpc-port 9934 --ws-port 9945 \ diff --git a/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh b/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh deleted file mode 100755 index 770284b9f41f83d336594a0a1a65e4ebe5eb4632..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Rialto Substrate bridge node. - -RUST_LOG=runtime=trace \ - ./target/debug/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33033 --rpc-port 9933 --ws-port 9944 \ diff --git a/polkadot/bridges/deployments/monitoring/GrafanaMatrix.Dockerfile b/polkadot/bridges/deployments/monitoring/GrafanaMatrix.Dockerfile deleted file mode 100644 index 420e134716a6b52c7d634e7b7a533dcea1ed00bc..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/monitoring/GrafanaMatrix.Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM ruby:alpine - -RUN apk add --no-cache git - -ENV APP_HOME /app -ENV RACK_ENV production -RUN mkdir $APP_HOME -WORKDIR $APP_HOME - -# The latest master has some changes in how the application is run. We don't -# want to update just yet so we're pinning to an old commit. -RUN git clone https://github.com/ananace/ruby-grafana-matrix.git $APP_HOME -RUN git checkout 0d662b29633d16176291d11a2d85ba5107cf7de3 -RUN bundle install --without development - -RUN mkdir /config && touch /config/config.yml && ln -s /config/config.yml ./config.yml - -CMD ["bundle", "exec", "bin/server"] diff --git a/polkadot/bridges/deployments/monitoring/disabled.yml b/polkadot/bridges/deployments/monitoring/disabled.yml deleted file mode 100644 index a0b4ed3aad0392afc89159c892f2a045728cfcf9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/monitoring/disabled.yml +++ /dev/null @@ -1,15 +0,0 @@ -# A disabled version of monitoring. -# -# We replace each service with a no-op container. We can't simply not include this file, -# cause the bridge-specific compose files might have overrides. -version: '3.5' -services: - prometheus-metrics: - image: alpine - - grafana-dashboard: - image: alpine - - grafana-matrix-notifier: - image: alpine - diff --git a/polkadot/bridges/deployments/monitoring/docker-compose.yml b/polkadot/bridges/deployments/monitoring/docker-compose.yml deleted file mode 100644 index f4356306c842fcb4442f86fc1baaa8e0f35e4150..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/monitoring/docker-compose.yml +++ /dev/null @@ -1,28 +0,0 @@ -version: '3.5' -services: - prometheus-metrics: - image: prom/prometheus:v2.20.1 - ports: - - "9090:9090" - - grafana-dashboard: - image: grafana/grafana:7.1.3 - environment: - GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASS:-admin} - GF_SERVER_ROOT_URL: ${GRAFANA_SERVER_ROOT_URL} - GF_SERVER_DOMAIN: ${GRAFANA_SERVER_DOMAIN} - ports: - - "3000:3000" - depends_on: - - prometheus-metrics - - grafana-matrix-notifier: - build: - context: . - dockerfile: ./monitoring/GrafanaMatrix.Dockerfile - volumes: - - ./monitoring/grafana-matrix:/config - ports: - - "4567:4567" - depends_on: - - grafana-dashboard diff --git a/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml b/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml deleted file mode 100644 index ae55b9b6dcff1a43b1cff99c636a4ef3fb17c110..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -# Webhook server configuration -# Or use the launch options `-o '::' -p 4567` -#bind: '::' -#port: 4567 - -# Set up your HS connections -matrix: -- name: matrix-parity-io - url: https://matrix.parity.io - # Create a user - log that user in using a post request - # curl -XPOST -d '{"type": "m.login.password", - # "user":"grafana", - # "password":"2m4ny53cr3t5"}' - # "https://my-matrix-server/_matrix/client/r0/login" - # Fill that access token in here - access_token: "" - #device_id: # Optional - -# The default message type for messages, should be either m.text or m.notice, -# defaults to m.text -msgtype: m.text - -# Set up notification ingress rules -rules: -- name: bridge # Name of the rule - room: "#bridges-workers:matrix.parity.io" # Room or ID - matrix: matrix-parity-io # The Matrix HS to use - defaults to first one - msgtype: m.notice - # The following values are optional: - image: true # Attach image to the notification? - embed_image: true # Upload and embed the image into the message? - #templates: - # Templates to use when rendering the notification, available placeholders: - # %TEMPLATES% - lib/grafana_matrix/templates - # $ - Environment variables - #html: "%TEMPLATES%/html.erb" # Path to HTML template - #plain: "%TEMPLATES%/plain.erb" # Path to plaintext template - #auth: - #user: example - #pass: any HTTP encodable string -#- name: other-hq -# room: "#hq:private.matrix.org -# matrix: matrix-priv - -# To use the webhook, you need to configure it into Grafana as: -# -# Url: http://:/hook?rule= -# Http Method: POST diff --git a/polkadot/bridges/deployments/networks/OpenEthereum.Dockerfile b/polkadot/bridges/deployments/networks/OpenEthereum.Dockerfile deleted file mode 100644 index d47708ca29bf32d4f26484c1d90f1c926608fc29..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/networks/OpenEthereum.Dockerfile +++ /dev/null @@ -1,91 +0,0 @@ -FROM ubuntu:xenial AS builder - -# show backtraces -ENV RUST_BACKTRACE 1 - -ENV LAST_DEPS_UPDATE 2020-06-19 - -# install tools and dependencies -RUN set -eux; \ - apt-get update && \ - apt-get install -y file curl jq ca-certificates && \ - apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev - -ENV LAST_CERTS_UPDATE 2020-06-19 - -RUN update-ca-certificates && \ - curl https://sh.rustup.rs -sSf | sh -s -- -y - -ENV PATH="/root/.cargo/bin:${PATH}" -ENV LAST_RUST_UPDATE="2020-09-09" -RUN rustup update stable && \ - rustup install nightly && \ - rustup target add wasm32-unknown-unknown --toolchain nightly - -RUN rustc -vV && \ - cargo -V && \ - gcc -v && \ - g++ -v && \ - cmake --version - -WORKDIR /openethereum - -### Build from the repo -ARG ETHEREUM_REPO=https://github.com/paritytech/openethereum.git -ARG ETHEREUM_HASH=344991dbba2bc8657b00916f0e4b029c66f159e8 -RUN git clone $ETHEREUM_REPO /openethereum && git checkout $ETHEREUM_HASH - -### Build locally. Make sure to set the CONTEXT to main directory of the repo. -# ADD openethereum /openethereum - -WORKDIR /parity-bridges-common - -### Build from the repo -# Build using `master` initially. -ARG BRIDGE_REPO=https://github.com/paritytech/parity-bridges-common -RUN git clone $BRIDGE_REPO /parity-bridges-common && git checkout master - -WORKDIR /openethereum -RUN cargo build --release --verbose || true - -# Then rebuild by switching to a different branch to only incrementally -# build the changes. -WORKDIR /parity-bridges-common -ARG BRIDGE_HASH=master -RUN git checkout . && git fetch && git checkout $BRIDGE_HASH -### Build locally. Make sure to set the CONTEXT to main directory of the repo. -# ADD . /parity-bridges-common - -WORKDIR /openethereum -RUN cargo build --release --verbose -RUN strip ./target/release/openethereum - -FROM ubuntu:xenial - -# show backtraces -ENV RUST_BACKTRACE 1 - -RUN set -eux; \ - apt-get update && \ - apt-get install -y curl - -RUN groupadd -g 1000 openethereum \ - && useradd -u 1000 -g openethereum -s /bin/sh -m openethereum - -# switch to user openethereum here -USER openethereum - -WORKDIR /home/openethereum - -COPY --chown=openethereum:openethereum --from=builder /openethereum/target/release/openethereum ./ -# Solve issues with custom --keys-path -RUN mkdir -p ~/.local/share/io.parity.ethereum/keys/ -# check if executable works in this container -RUN ./openethereum --version - -EXPOSE 8545 8546 30303/tcp 30303/udp - -HEALTHCHECK --interval=2m --timeout=5s \ - CMD curl -f http://localhost:8545/api/health || exit 1 - -ENTRYPOINT ["/home/openethereum/openethereum"] diff --git a/polkadot/bridges/deployments/networks/eth-poa.yml b/polkadot/bridges/deployments/networks/eth-poa.yml deleted file mode 100644 index 7291a2ccfd70b30f7b5e4dd0bf8a28490cbf8683..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/networks/eth-poa.yml +++ /dev/null @@ -1,46 +0,0 @@ -# Compose file for quickly spinning up a local instance of an Ethereum PoA network. -# -# Note that this PoA network is only used for testing, so the configuration settings you see here -# are *not* recommended for a production environment. -# -# For example, do *not* keep your account key in version control, and unless you're _really_ sure -# you want to provide public access to your nodes do *not* publicly expose RPC methods. -version: '3.5' -services: - poa-node-arthur: &poa-node - image: hcastano/openethereum-bridge-builtins - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=arthur - - --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 - environment: - RUST_LOG: rpc=trace,txqueue=trace,bridge-builtin=trace - ports: - - "8545:8545" - - "8546:8546" - - "30303:30303" - - poa-node-bertha: - <<: *poa-node - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=bertha - - --engine-signer=0x007594304039c2937a12220338aab821d819f5a4 - ports: - - "8645:8545" - - "8646:8546" - - "31303:30303" - - poa-node-carlos: - <<: *poa-node - entrypoint: - - /home/openethereum/openethereum - - --config=/config/poa-node-config - - --node-key=carlos - - --engine-signer=0x004e7a39907f090e19b0b80a277e77b72b22e269 - ports: - - "8745:8545" - - "8746:8546" - - "32303:30303" diff --git a/polkadot/bridges/deployments/networks/millau.yml b/polkadot/bridges/deployments/networks/millau.yml deleted file mode 100644 index 43238df09beaed90c49c7419392d3230a6b5b2d0..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/networks/millau.yml +++ /dev/null @@ -1,89 +0,0 @@ -# Compose file for quickly spinning up a local instance of the Millau Substrate network. -# -# Note that the Millau network is only used for testing, so the configuration settings you see here -# are *not* recommended for a production environment. -# -# For example, do *not* keep your `node-key` in version control, and unless you're _really_ sure you -# want to provide public access to your nodes do *not* publicly expose RPC methods. -version: '3.5' -services: - millau-node-alice: &millau-bridge-node - image: paritytech/millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-bob/tcp/30333/p2p/12D3KooWM5LFR5ne4yTQ4sBSXJ75M4bDo2MAhAW2GhL3i8fe5aRb - - --alice - - --node-key=0f900c89f4e626f4a217302ab8c7d213737d00627115f318ad6fb169717ac8e0 - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace - ports: - - "19933:9933" - - "19944:9944" - - millau-node-bob: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --bob - - --node-key=db383639ff2905d79f8e936fd5dc4416ef46b514b2f83823ec3c42753d7557bb - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace,jsonrpc_ws_server=trace,parity_ws=trace - ports: - - "20033:9933" - - "20044:9944" - - millau-node-charlie: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --charlie - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "20133:9933" - - "20144:9944" - - millau-node-dave: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --dave - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "20233:9933" - - "20244:9944" - - millau-node-eve: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --eve - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "20333:9933" - - "20344:9944" diff --git a/polkadot/bridges/deployments/networks/rialto.yml b/polkadot/bridges/deployments/networks/rialto.yml deleted file mode 100644 index 7d8ba1abd569183203f8c6110dcca4956bec2cbc..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/networks/rialto.yml +++ /dev/null @@ -1,89 +0,0 @@ -# Compose file for quickly spinning up a local instance of the Rialto Substrate network. -# -# Note that the Rialto network is only used for testing, so the configuration settings you see here -# are *not* recommended for a production environment. -# -# For example, do *not* keep your `node-key` in version control, and unless you're _really_ sure you -# want to provide public access to your nodes do *not* publicly expose RPC methods. -version: '3.5' -services: - rialto-node-alice: &rialto-bridge-node - image: paritytech/rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-bob/tcp/30333/p2p/12D3KooWSEpHJj29HEzgPFcRYVc5X3sEuP3KgiUoqJNCet51NiMX - - --alice - - --node-key=79cf382988364291a7968ae7825c01f68c50d679796a8983237d07fe0ccf363b - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace - ports: - - "9933:9933" - - "9944:9944" - - rialto-node-bob: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --bob - - --node-key=4f9d0146dd9b7b3bf5a8089e3880023d1df92057f89e96e07bb4d8c2ead75bbd - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,pallet_substrate_bridge=trace,pallet_bridge_call_dispatch=trace,pallet_message_lane=trace,jsonrpc_ws_server=trace,parity_ws=trace - ports: - - "10033:9933" - - "10044:9944" - - rialto-node-charlie: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --charlie - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "10133:9933" - - "10144:9944" - - rialto-node-dave: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --dave - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "10233:9933" - - "10244:9944" - - rialto-node-eve: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --eve - - --rpc-cors=all - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "10333:9933" - - "10344:9944" diff --git a/polkadot/bridges/deployments/reverse-proxy/README.md b/polkadot/bridges/deployments/reverse-proxy/README.md deleted file mode 100644 index ded81f80a1b3c7a11ea287132f4e6107d442542e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/reverse-proxy/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# nginx-proxy - -This is a nginx reverse proxy configuration with Let's encrypt companion. -Main purpose is to be able to use `https://polkadot.js.org/apps` to connect to -a running network. - -## How to? - -In current directory: -```bash -docker-compose up -d -``` - -Then start `rialto` network with the same command (one folder up). `nginx` should -pick up new containers being created and automatically create a proxy setup for `Charlie`. diff --git a/polkadot/bridges/deployments/reverse-proxy/docker-compose.yml b/polkadot/bridges/deployments/reverse-proxy/docker-compose.yml deleted file mode 100644 index 61c9505ae568d0beeef62bbf5b1121a3d4a4d560..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/reverse-proxy/docker-compose.yml +++ /dev/null @@ -1,42 +0,0 @@ -version: '2' -services: - nginx-proxy: - image: jwilder/nginx-proxy - container_name: nginx-proxy - networks: - - nginx-proxy - - deployments_default - ports: - - "80:80" - - "443:443" - volumes: - - conf:/etc/nginx/conf.d - - vhost:/etc/nginx/vhost.d - - html:/usr/share/nginx/html - - dhparam:/etc/nginx/dhparam - - certs:/etc/nginx/certs:ro - - /var/run/docker.sock:/tmp/docker.sock:ro - - letsencrypt: - image: jrcs/letsencrypt-nginx-proxy-companion - container_name: nginx-proxy-le - networks: - - nginx-proxy - volumes_from: - - nginx-proxy - volumes: - - certs:/etc/nginx/certs:rw - - /var/run/docker.sock:/var/run/docker.sock:ro - -volumes: - conf: - vhost: - html: - dhparam: - certs: - -networks: - nginx-proxy: - driver: bridge - deployments_default: - external: true diff --git a/polkadot/bridges/deployments/run.sh b/polkadot/bridges/deployments/run.sh deleted file mode 100755 index fe4afd0fbb29cb8c6812946eb4e67f8ed0846b32..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/run.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/bash - -# Script used for running and updating bridge deployments. -# -# To deploy a network you can run this script with the name of the network you want to run. -# -# `./run.sh poa-rialto` -# -# To update a deployment to use the latest images available from the Docker Hub add the `update` -# argument after the bridge name. -# -# `./run.sh rialto-millau update` -# -# Once you've stopped having fun with your deployment you can take it down with: -# -# `./run.sh rialto-millau stop` - -set -xeu - -# Since the Compose commands are using relative paths we need to `cd` into the `deployments` folder. -cd "$( dirname "${BASH_SOURCE[0]}" )" - -function show_help () { - set +x - echo " " - echo Error: $1 - echo " " - echo "Usage:" - echo " ./run.sh poa-rialto [stop|update] Run PoA <> Rialto Networks & Bridge" - echo " ./run.sh rialto-millau [stop|update] Run Rialto <> Millau Networks & Bridge" - echo " " - echo "Options:" - echo " --no-monitoring Disable monitoring" - exit 1 -} - -RIALTO=' -f ./networks/rialto.yml' -MILLAU=' -f ./networks/millau.yml' -ETH_POA=' -f ./networks/eth-poa.yml' -MONITORING=' -f ./monitoring/docker-compose.yml' - -BRIDGE='' -NETWORKS='' -SUB_COMMAND='start' -for i in "$@" -do - case $i in - --no-monitoring) - MONITORING=" -f ./monitoring/disabled.yml" - shift - ;; - poa-rialto) - BRIDGE=$i - NETWORKS+=${RIALTO} - NETWORKS+=${ETH_POA} - shift - ;; - rialto-millau) - BRIDGE=$i - NETWORKS+=${RIALTO} - NETWORKS+=${MILLAU} - shift - ;; - start|stop|update) - SUB_COMMAND=$i - shift - ;; - *) - show_help "Unknown option: $i" - ;; - esac -done - -if [ -z "$BRIDGE" ]; then - show_help "Missing bridge name." -fi - -BRIDGE_PATH="./bridges/$BRIDGE" -BRIDGE="-f $BRIDGE_PATH/docker-compose.yml" -COMPOSE_FILES=$BRIDGE$NETWORKS$MONITORING - -# Compose looks for .env files in the the current directory by default, we don't want that -COMPOSE_ARGS="--project-directory . --env-file " -COMPOSE_ARGS+=$BRIDGE_PATH/.env - -# Read and source variables from .env file so we can use them here -grep -e MATRIX_ACCESS_TOKEN -e WITH_PROXY $BRIDGE_PATH/.env > .env2 && . ./.env2 && rm .env2 - -if [ ! -z ${MATRIX_ACCESS_TOKEN+x} ]; then - sed -i "s/access_token.*/access_token: \"$MATRIX_ACCESS_TOKEN\"/" ./monitoring/grafana-matrix/config.yml -fi - -# Check the sub-command, perhaps we just mean to stop the network instead of starting it. -if [ "$SUB_COMMAND" == "stop" ]; then - - if [ ! -z ${WITH_PROXY+x} ]; then - cd ./reverse-proxy - docker-compose down - cd - - fi - - docker-compose $COMPOSE_ARGS $COMPOSE_FILES down - - exit 0 -fi - -# See if we want to update the docker images before starting the network. -if [ "$SUB_COMMAND" == "update" ]; then - - # Stop the proxy cause otherwise the network can't be stopped - if [ ! -z ${WITH_PROXY+x} ]; then - cd ./reverse-proxy - docker-compose down - cd - - fi - - - docker-compose $COMPOSE_ARGS $COMPOSE_FILES pull - docker-compose $COMPOSE_ARGS $COMPOSE_FILES down - docker-compose $COMPOSE_ARGS $COMPOSE_FILES build -fi - -docker-compose $COMPOSE_ARGS $COMPOSE_FILES up -d - -# Start the proxy if needed -if [ ! -z ${WITH_PROXY+x} ]; then - cd ./reverse-proxy - docker-compose up -d -fi diff --git a/polkadot/bridges/deployments/types.json b/polkadot/bridges/deployments/types.json deleted file mode 100644 index b7b0c35d2f9b019aa1a3ea705427555c0591c011..0000000000000000000000000000000000000000 --- a/polkadot/bridges/deployments/types.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "HeaderId": { - "number": "u64", - "hash": "Hash" - }, - "PruningRange": { - "oldest_unpruned_block": "u64", - "oldest_block_to_keep": "u64" - }, - "FinalityVotes": { - "votes": "Map", - "ancestry": "Vec" - }, - "FinalityAncestor": { - "id": "HeaderId", - "submitter": "Option
", - "signers": "Vec
" - }, - "StoredHeader": { - "submitter": "Option
", - "header": "AuraHeader", - "total_difficulty": "U256", - "next_validator_set_id": "u64", - "last_signal_block": "Option" - }, - "AuraHeader": { - "parent_hash": "Hash", - "timestamp": "u64", - "number": "u64", - "author": "Address", - "transactions_root": "Hash", - "uncles_hash": "Hash", - "extra_data": "Bytes", - "state_root": "Hash", - "receipts_root": "Hash", - "log_bloom": "Hash", - "gas_used": "u64", - "gas_limit": "u64", - "difficulty": "u64", - "seal": "Vec" - }, - "AuraScheduledChange": { - "validators": "Vec
", - "prev_signal_block": "Option" - }, - "ValidatorsSet": { - "validators": "Vec
", - "signal_block": "Option", - "enact_block": "HeaderId" - }, - "BridgedBlockHash": "H256", - "BridgedBlockHasher": "BlakeTwo256", - "BridgedBlockNumber": "u32", - "BridgedHeader": "Header", - "ImportedHeader": { - "header": "BridgedHeader", - "requires_justification": "bool", - "is_finalized": "bool", - "signal_hash": "Option" - }, - "AuthoritySet": { - "authorities": "AuthorityList", - "set_id": "SetId" - }, - "ScheduledChange": { - "authority_set": "AuthoritySet", - "height": "BridgedBlockNumber" - }, - "Id": "[u8; 4]", - "InstanceId": "Id", - "LaneId": "Id", - "MessageNonce": "u64", - "MessageId": "(Id, u64)", - "MessageKey": { - "lane_id": "LaneId", - "nonce:": "MessageNonce" - } -} diff --git a/polkadot/bridges/diagrams/ARCHITECTURE.md b/polkadot/bridges/diagrams/ARCHITECTURE.md deleted file mode 100644 index 6da88c448c95eddd726952c75ad3a49d458e0e76..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/ARCHITECTURE.md +++ /dev/null @@ -1,13 +0,0 @@ -# Bridge Architecture Diagrams - -## Bridge Relay -![General Overview](general-overview.svg) -![Bridge Relay Node](bridge-relay.svg) - -## Runtime Modules -![Ethereum Pallet](ethereum-pallet.svg) -![Currency Exchange Pallet](currency-exchange-pallet.svg) - -## Usage -![Cross Chain Fund Transfer](cross-chain-fund-transfer.svg) -![Parachain](parachain.svg) diff --git a/polkadot/bridges/diagrams/bridge-architecture-diagrams.drawio b/polkadot/bridges/diagrams/bridge-architecture-diagrams.drawio deleted file mode 100644 index bf073129c2973eaa597162dbab8328e1ca56d52c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/bridge-architecture-diagrams.drawio +++ /dev/null @@ -1 +0,0 @@ -5VjZctowFP0aHul4wYAfCVm70DR0JklfOootbE1kiQg52P36XmF5lSctaWkmDQ9gHclXvuccpGsN3HmSnQm0jj/xENOBY4XZwD0eOI499X34UUheII7tjQokEiTUo2pgSX5gDVoaTUmIN62BknMqyboNBpwxHMgWhoTg2/awFaftWdcowgawDBA10WsSyrhAp55V4+eYRHE5s23pnjsU3EeCp0zPxzjDRU+CyjB66CZGId82IPdk4M4F57K4SrI5porXkjH/4b1k2SKJtxfn4/lksTr3vgyLYKf73FIlJzCTzw6dffh0tZiFF6v01sZe9PCwHrLhpAj9iGiqmdS5yrykFtJeq8sgp4SFWAzco21MJF6uUaDwLVgKsFgmFFo2XN4pOnH48a4CKpI/pxKiYI1vpOD3lWBA6NFvZqoZecRC4qzhAJ35GeYJliKHIbp3OC2dmpfAVAPb2i/ORGNxwytuCSLt0aiKXrMNF5rwPcgfvxXy3ZclvzcLxyD/CFY2WGUc6wpTlBtSiIJYzd4vVDDJXRFK55xysYvmrnafQ5LuWU6bdNvyTNZtzzNZt+1Dse4arC85JSGR6smXCRISfuecSYFgl3jtEkymHd9PehRwrB7fe39BgO/Xs+D92bfTW9j6bvJ08fVKXvbYHkoCFq4RgEcpoXJ4wf4/3seTf8h771pvGbyfyBgLnCaAXvIZfC+w3HJxb7APWcs+iktKdyVLm2UNIUoipnYPIHi3dygOCVRNM92RkDBU0/RqWqtuHVSsUUesUc/mYLs9YjmHEss2xFqmd0A6kmp7WED5/NZEcgyR/JcWyVzJGv+oq5RJkii14GUnpaZer21Jc/zuVjLuWdL6BBgfSoCevbzxL/n/FRj3FbH/VIGRocA8FZB4oJ77JAtixKI3ooX10lpMDS0MljELZ+qsQy32FG02JNixCUWvCTf4BvJEfqNX+F3jVjXeeWXzOGt2HudlKyPyph4JrdtGT32TapT37KfYhqciwE+Qoo8XIMUIPxVPc4XD1imPqX9DXe+J6k3AC5wkj+2zoT7F9QyXnDDZsFdVLlbvq37HNkXm+r7moYsRyu2E8rslZkGOEQosoV5Cq2FrNWDz5EN7nZl8r+PqImbt8YrZ59veP7zt7f1sv5+FX5s17e6rzfOtaXdruoNZ03joP7QmNOtj12J4fa7tnvwE7Vtdd5s4EP01Pmf3oTlI4suPsZOm3U27aewmbd8wKIYNRi6I2O6vX2EEBklZY5/g4Lh5iTSIQbp3GM2McA8NZ8ur2Jn7n4iHwx7UvGUPXfQgBJreZ/8yyaqQGHoumcaBx2UbwSj4hYuBXJoGHk5qAykhIQ3mdaFLogi7tCZz4pgs6sMeSFh/6tyZYkkwcp1Qlt4HHvVzqQ2tjfwDDqZ+8WRg8hVPHPdxGpM04s+LSITzKzOnUMPXmPiORxYVEbrsoWFMCM1bs+UQhxmuBWL3H1f34fWjefXXl+Sn83Xw9/jz3btc2ftdbikXF+OI7q/aH7vjxXl4i6O7wSNBqO8u+S3akxOmHEm+VroqoF2DgzMlWg8NFn5A8WjuuNnVBbMmJvPpLGQ9wJoJjcljSQGDaNBw7nyNTzimeFnhlK/lCpMZpvGKDSmuIj53brJQ5/3Fhn+jz2V+hXtkcaHDbW5a6t6gxxocwB3ARBKYI5LGLpYgZZY0z5ruKgwYtvF2XCc5CdeTUlDa7T8pZVowl7cGd78Ot23JcKvQBqAttHUJ7bETTzF9C2gboGtoG7JtryKXSa4JmUuQs2VSlWMYkpAw/C/WXhYNHoIwFEROGEyjjCyG6JqqDLSAOfpzfmEWeF72GCWHdV/VFjum4HkMmRxTQQ5sixtruxPHkXeebbMZsqGTJIFbp2c3qLBX241loKo+WIFEIYtx6NDgqb6Hq+DhT7ghAZve5i0x9DOjRoUpYpysHTC/r7o5blVlia8SXXsXSdWasXLp+5NonyiJEG1FvimJsirJHlomsb+dxI2LAvuEU1WX2YPoYf3X5r5v9WuIIk12drqlMA+9LW8H2o9ZDw6yXjdbRSgLVK8g0FoDGbw5kA1D6xrIUAK5SBi0YRjgSI5ljwxzYNcxt5ACdKAAHbWGuZw2nOo+a5svts9aYk7d8j4L5HTkNFhUxKki9PuHvJJBtM2inLjc3gwlIt92Mgmsuou0gSLAOmQ2CeRM5PRYMeyusdIgtdjm8fAyoN8y4Nh7n/e+V65cLDmm685qL4Bz39MgeO+IOxWrmhJ/jbdEKCgSA5iWXSlsEK8fg3l0a7dFonmIrDY1DxMar2se8tFEnEZ/lHwk6aQHmWotnweTYOr/eWI+X6+TDRQ1d6Aqurfm86Gcq9RYYxwJrDEeT401EzWgzbIOSZsqOTHDjAMveGLNadYcpRNGg0OzZL92lJKPZE+uDD4xToFQqrEVlB40+oJmE0ovqY9jnM5+MyrH02bXGG1waHbspfq+Imc5aKkeNjjUOnKQ9dcHWc4Mb2Li4iRhwk8sRiAea9ziJA1pcmJuBwnBAVKk8Yf1O8WEKmx9nXt5GFB8v8JZOz2yrDPxHExBFzIOSZecV5d0lWc1v+nim7ySLnh2UMLg9i2njUIIAzdeVW7Kut+r1za3rXstFVD4XlAmhNvCn44WWox9Cy26kMTrtqDomUILswdnVRk2zwYkkjW+QCkGyaWYgxjoCxtaxwxIL851C96NfQ1IVNTvnAG9wAn2EXu4IqfY6uL6nTZQY99jW90UFDX8xO15A204YVv733mZwonZruOLT2afP67ebbw4f2E8a+SI7Ptuxv7UuvkBJ++CMfn3Uvvxzf14p/g5RRnTv8YHPVIw1/Q9fL6Oo9VZaPODHtbd/LgmZ2jz6yV0+R8=7Vxbd6I6FP41PtoFhOuj2jr1zFjbOr3oy1mIUahIHMCqfTi//SQQlJsa26qo0zWrJRsIw97f/vbOTkIJ1MbzH64+MZuoD+2SwPXnJXBdEgSeEzX8h0gWkUQSQ8nQtfpUthK0rQ8YXUilU6sPvcSFPkK2b02SQgM5DjT8hEx3XTRLXjZAdvKpE30IM4K2odtZ6YvV981QqkrcSn4LraEZPZnn6JmeboyGLpo69HkOcmB4ZqxH3dBLPVPvo1lMBG5KoOYi5IdH43kN2kSvkcY6wsNtxX2ZP92Zzdn0X69lvw3LYWf1XW5ZvpwLHf/TXT8/NH/fPFbE22Zt6nTrlvHDmtNbuHfdnlJN0nf1F5FqA+VA0glXAtWZafmwPdENcnaG0YRlpj+2cYvHhwPLtmvIRi5uB6oEVfoE6PpwnjLWlvfhl0rGwIVoDH13ge+jvQiRCRcpY89iAFCozIwbPzKoTkE3XPa9Uh8+oBrM12bj58/RU/WpbjxKgLt2TH5SeY0MtUmb0OlXCN5xy7B1z7OMpALh3PJfY8cdovcribau59QMQWNBG7uoE/ul7g6hv+EtlPA62E+4XNYOMT1LOWqOZC60dd96TzpqnurpE+6RhV9jaWZeSpoZiCnreWjqGpDeFcd/qqOlH0cdSamOQsVkOgqQsHztL4BDYkAH1nmbNpHrm2iIHN2+WUmrSW9cXfMLoQmFzRv0/QWlaH3qoyTAdgNLqNwNbwUYQSUygooZLV9yVJAxRWM8wcrEsrY1dLB+Be4W6n3obqBDfjsd7qbsDEeu5z5VSmCZl7Pct6S5uFPKe6M++RzRLTKiWyoUusX16C4yqEUtSdAFALVyjqCWGEEtFwrU2ehZw3olmH7WbYsQtu6Q3w3nnbZDrHvFBLusFg7sWiHAjvXsLl5XWS9pdqKslzRWOXDQ+lwSvNVJZEYn4fnv9pL87FXjk3hR2JJXPNbQF7HLJuQCb8NjuFRiwaUGlensXFA23oAPwv/Ct6bSYhaqdygXrL/0HrSTAMPsMHTwsYGBgkMhqBKntQzdrtATY6vfD7EMPetD7wX9EYhR5eHOpWpJus4F3WbXyg6CaUGEPqUUryzk0QZ3xalASWi8TN30swCLLkGDgQf3MvIRGKoMpxdGmRmCK1QcFdWMMTowGySL4zxL9HzZecrcFR45yUV3l+xI6jrIZ2YQ/xpYGPIE4STfWQS5wZosx0Tj3tQ7XoajiqkMB+RkOOCg9TnhLEswCiMRgWLxUBbmBeehCDzfwkOprKnonASyFbOSINtYX9W+9Y4Ph+SQJGGhED8kJj+Gk0WF9FXxvBM7k19IL8jYg9Wh1e926PwcX9SUqyRcgXbY0nm2DNPATLEs1RYt8oHUIApw2cin5gQ+dV9xD5xlHUtldBPtQG6SqslL3GGdJJvXL6u+T453irMaonLsmpgoHiV0rQlD/GHDkMbqX6wTxl+dCVZSM/6ZqeA9e1i26nSS8yrHd6vI9ucVj6JCzxFqxl8r/GfLc49wprskXLSnvbHl+0WFNs+liwxHn0aJoHxm2GZdPMQLhcK2kJ0LP25ETwwrV+H9MBGd3YrfXitaN30EUiGdZwvpu85rZeapwCHmqfhsnIuVcl1oQKK64Hwgjmq5oJ6l22MPajGLpmpWefVc8aD1XPHIznzUKlHEtMXNzzNBdt8rLLOh97erGyMs+oHQKaQzIl+0dEYElxwvWVe3HszF5FS+Cw5cZOKz8wD3U8fyTCyr6qfoYsIeR8NPHnRbvTey4UXg7HAWi9z6DF1rsEiVDeypYfUNU3f98JYGtYNvdpvC3POvX7yH8MrNtXI59v7UpDEjxFScZwH8b4Acnzomz9N2tKWjRABHfrDcw3dazhBLAWn5LhrBFpH5RLM8t7s/7lApBHLCiKoCMkbU5KwNFWFnG+JmzIwbzVozYRBsBlhXAleZYtpziTawU5BZOa5m6s4wmuNcZ+4yNvft80ipdM1XFnMrF2BuoIgFNPdmO/6H7fgxrvm1udBnsaPKasd45OTVHAvm2ybJvrpr0FuCILo1ons4rvjRxqVoYxeJvsvdTD0bYfAHorplL2k8tqPpiuMAL8RnY1dzs/eYEbFpyFR+MgEoc7HbchKB9I3fnAEorKu8lE/smlpuQkyMliLhbqnC+gExewrwCaqzBviXA2fLwWtptUSJTNGSfZypNUrrHAa+dZu/P0Yv/zy3OiwOo10A8UnRkOaEiM/GdpSu1cZb76HMYEeVY7XjX+I7JPGxLkJRWKfXz4P4GJP24ajbNLtQaz5UZBYnEL5CZusQLnGnQnOpndzROoAtLBctZT4Gy4nYwI7VgvPH4Q2LgcGFsJysiaIIBFVTT4vvVNZqqsK6Le48+C53hcM6pzDsbtPSfk0rr9OfLE4hHiiFC66jHfNbuS64A2M8K4n1PQh+9lmoUlKkyOdNt4hZXEWlkf2wYsPBJ+J44DD8EClx+MglH2nZCBAPdZuPd3B0bzoeC0CkC8jxZYk/uRy/iu340pOa7WlbYLEjc03yxKPfacW8KHXfvtCc9fsG5xHzmro7KsX2j5PnClydljX6m12jNek2u8q8/CJe1Vlc4xLqt4qqnRzFPWM7jv80fjVuJn9Y7Pi3fltEilNZP3ahXlb99t6dku/cca2gPnvLUp/VsEM8d64mPX94x+IQl1Cf1YTkXvxTIDbjT7fZK5uPVqs1Z7Cj9rc+W0hiY94keFn12bBeEWx1qgaGx+ymB8tTSkI1mcWx0V4Hu4v8Xgei+MQyH6/xrO5ylpVcnudSKzNOoJbLYxPXXzv3//hig8XEzMX6y2TEZQlY40T1SOzIvHeNdSvOebDjXTBTv4cJLI15fuMsaU9OrcqMvoZ5VNK7cbaUKna18ZdKFSdvY16W0itHD2llLFp9ajwkhdW33MHN/w==5VpdV6M8EP41vXRPAqUtl9rWj3P86K67x1dv9qSQQjQQDMG2/vo3gSCf1lqrrWe9kUyGgTzzzGQmtGMOg8UJR5F/wVxMOwZwFx1z1DEMCLq2/Kcky1xidTOJx4mrZYXgmjzjXFFLE+LiuKIoGKOCRFWhw8IQO6IiQ5yzeVVtxmj1qRHycENw7SDalN4QV/iZdGCBQn6KiefnT4ZAz0yR8+BxloT6eSELcTYToNyMVo195LJ5SWSOO+aQMyayq2AxxFThmiN2PT4fJ/dD93Hi9/2jx8mjMWEHmbHj99zysjiOQ7Gx6cNxd+CeUfE4t9njYhksEXIODL00scyRxK4EVg8ZFz7zWIjouJAepWhhZRXIUaFzzlgkhVAK77EQS80SlAgmRb4IqJ6Vq+DL//T96eBWDX5Y+XC0KE+Olnq0Jgwarpgl3NGLurzD4Z+Zc3N7B/4KByx/To2hhgsIxD0sVmCk7SlgSmTTIJ9gFmD5klKBY4oEeaoyEmliey96hYvkhfZSOvwTY341vVfhYQCKpphmt/7mKIyRIwgLM5s0cYjr+IiLTP1Me9h7uLvw77B98fOwl2nmvn99/VWulHhQ8tfcJwJfRyhFcy4zSIUESgNxR3vbUo6asVDoMYR6PGSU8dS0CdI/KY+lTRJ6UmqqkeDsAV8pmVAuh+D9Tn/CXODFSjfls3lY65RnQo3ZvMgfthb5pdTRHbzbsXJY8u07orO3i+jcPMpa17BulFlfFGXrIa/f+gnRRD9pgngsdwbQiMaSd6pB0RY2G+O8PrFNWCU27IEGsV/IX2Z2rrd1MM3dbjLFvnJbntv6JrNq83iT/mCv6N9t0P9WVndtXjzP9qky8ogSL5TXjkQQy5R/pLhLZK12qCcC4rqZk3FMntE0taewjxgJRboW66hjjVq9sYphjSB5KTj1QyqVW1vwHIAfEPT7lQDKi6S1odfWJ2o1JRU2m8WSA3XfvLzE5u6CO9kovlmEwa3Xcemth5yjZUlBc7hJgzw996vpuduvle01fQgGg1U3yIvsFbbLqH4jA1yyPU4AeQRsJQPYwLYrkO9/Amh46yzOYFPPZzP1EqqtJjK+ZTi7qvGlzHnomMfNIsZnwTSJd1jAAKuK/sBqFjBmSwED4WdVMHDHJcxX9cmrKvPvkmCtbq2xA/YX5Evrm1VMcJsl06BnGd8rYTbddYqeVHs3kzGrUieiHCNXLXOKcahQoYgEKpr3MGN27VrL121p+UBbxgSfljFBA6bvd3bRXzPz2dtOfB+Cvlm6jXDEYqIiS8YXS1/EIRHBWbA5joRdNFm9D4cZg+6+HWbAnRyZb5nY9s629A9hbzeY/QvPEVfl7HUyDYhQu+de8hgCo9rFfSaRX/uQMA7dzNb7PyCsYMe/+wUB1JJTW6PyhZ8Q7Mu+Efds/3QijJuDoTGZLY5bDrIPFRicOGm1g9NecM4SmvaEShZKWSqdSbRljvBJs46NfRSpS1miUpaIt+MqwlxWT2lxm980KUSf5qJe7bClv2bQDbawe+C/1IPPEb8ySASdE352FrHdfG/dfPNoXcO6X3T6u9o7Vr11KRCGLFRMUkznLMgIj1W/JYv9hEuwnGVRMVEmaVuZ2cdtps73Ly2XWmFvnky9co74z56lrGLrm7FT9OCGNr1pz52fntRbSbvGjGyJ+q6CHA1Djcq9bijDYIWhj5wCyGHxw5xMvfjlkzn+Hw==5VhdU6MwFP01POoAgaqPtlZ3R3d0ts7sc4BbyBhIJ4R++Os3KeEzONZVnLr2BXJJbsg599x7i4Vm6faG41Xyi0VALdeOtha6slzXsb0LeVGWXWXxvdIScxJpW2NYkGeoJmprQSLIOxMFY1SQVdcYsiyDUHRsmHO26U5bMtrddYVjMAyLEFPT+odEIimt577d2H8AiZNqZ8fWTwIcPsWcFZneL2MZlE9SXLnRU/MER2zTMqG5hWacMVHepdsZUIVrhdht+pM4cjMes+y68Cfp+vH2pHR2/ZYl9eE4ZOJjXbul6zWmhUZyLhLgUKT6zGJXQSyPv1K34Y6SLAJuoekmIQIWKxwq+0aGlrQlIqVy5MjbQMEK0V1QG2qw7wshvYC2H3g+jcMauIBti3d93htgKQi+k1P0Uxdp6nRYu+d6vGlipI6EpB0f1UKs4zKufTcIyxsN8hsARwbgiyLIBccC/gfEvcnRIe4ZiF+GgvEX0S5SWk5AU3VuIpPMHQ6APrCcCMIyOSVgQrBUTqDqwbTGeMaoWie9oeX+1/JxSUms1grW44yVxMzqzGiPyI/jd/nxfZMfNEDPZCx2JgY7BjGQRZeqRCglUJznJOwC+DasIOpUEhOpFhL+ABKVjQPFgqy79WcIHr3DAyPy9RoiLnpEuD2Ec1bwEPSqdlrvOXK9riOv70hgHoMwHO3Zqo/97wRWHUCLQedUjheSNnm5Y+GTvDxuTVolEXthddnEWiehZBP4gIBSEkXKx5RDTp5xsPenNLNSR9wf2p9a/tXhkVHFYF9FdbuiN+nU/SF12ae263Xz3/uCpZrClsscxqHPMehzFX3zlKgXmK9Bh81QqsyYgAOK0gs1KML86V6uIkKBJbHzP7EVqPXXLkzOgN7Px8p8jtl7TWVvLbOTa/+W8bIzYOdlddfwvYL6WECiXr7x7QEg3c8sIY7ZU32PGoLcV1L/oTUE9boCrx/0Y9cQs0dDp3sZ4OjFJHQ89aMOwA8oIM7ki9UP/5uKz+uLr6+ZQ8XnnfUc9f8JjS0+swX3Wg2c6tzsB87Y8pgVWEXhuxV4IiXYbRSOXoFnBn++4u+G4/0bXMumITe4+4ItXP/bwpgtnBw2H/ZKppovp2j+Fw==3ZnLcpswFIafxstmEEIOLGPHSWbaTtx60aY7GVRgAsgjhI3z9BVG4ibqOB5fszL6EQfrO7+kI3sAx3H+yPAi+E49Eg1Mw8sH8H5gmsCwHPFRKGulIKtUfBZ6UquFWfhGVEepZqFH0lZHTmnEw0VbdGmSEJe3NMwYXbW7/aVR+60L7BNNmLk40tVfoceDUrWRUetPJPQD9WZgyDtz7L76jGaJfF9CE1LeibEKI7umAfboqiHByQCOGaW8vIrzMYkKrorY16c0c//APGEPy2ecvC3Wyx9fymAPH3mkGhwjCT9saLMMvcRRJklOafQqxsnlmPlaIRbDXxSX7joKE4+wARytgpCT2QK7hb4S1hJawONItIC4nBdYifdtXgkV7OeMiyhE6juOT3JYEsZJ3si7HO8joTHhbC26yLu2zJx0tXmLyvaqtogJZZ+gYQ+oRCxt6Veha8DiQjL+AG+o8f4MnC0TtUEr8E3QqA+0eSzQlgZ6wgPCSBYL9WeW8DAW89wQS2EWES0HrCQqsb2D/2hUrTZVC/VQBX1UjwUVaVBn2TzlDHNytVQRODfVoUZ1nDExbLf41pPcDXDiXy9f6Jybr/3+mksS766oQoplN8JpGrqCUMox47q8N8OUZswl72/G4rU+2RZPbiLEa9VEekYaxPsWX6UxEmEeLtuVVF8W5BumNBTDrRIOUXuXRUYnkeW45VPNAqUTyDLagar1TgUqwWiBNqaohr2/T5ytq9sIp2JqmcY4wGEiPu8+w8Ztd5Cb+lwFdo91zKPt22pl2DELo8+QBWA47TRA6/x5AFoeGgXUlN7VSbj+BHSnwbCn0jo1f/1kNhIn77IQEAv2Or3M7d8GHSv3HAXAsAclsI6GcodD154FAMlD/ltcGzeOM5Ttl00bQUu27wsyhmqsG40pYaKe48UU2WgHLifgjuWEstqF1BPDzmJoWc4N2q+i6M7rnlBHrimAfg49mPcS8dVK8yHVfGneq423aTWd13hMOXarWw/sTLW5XJs17Y41h90zy87G7ARCJy51gX6SP7ctD22xXQ9TzmU5DP2nEvyow5xuZe+c2GH6rxqH3nRBe8M96QK2q7tUNXQh9gJGpzyzbvf0FzCGnUjdUnhvg4lm/adK2b3+1wpO/gE= \ No newline at end of file diff --git a/polkadot/bridges/diagrams/bridge-relay.svg b/polkadot/bridges/diagrams/bridge-relay.svg deleted file mode 100644 index 2907a7c7fce9d99b60158af33633db9e6b871d34..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/bridge-relay.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Source
Source
Target
Target
Sync Loop
Sync Loop
Source Client
Source Client
RPC
RPC
RPC
RPC
run(source=sub, target=eth)
run(source=sub, target=eth)
run(source=eth, target=sub)
run(source=eth, target=sub)
Substrate Sync Loop
Substrate Sync Loop
Ethereum Sync Loop
Ethereum Sync Loop
Process Method Results
Process Method Results
Update Target Methods
Update Target Methods
Update Source Methods
Update Source Methods
Target Client
Target Client
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/cross-chain-fund-transfer.svg b/polkadot/bridges/diagrams/cross-chain-fund-transfer.svg deleted file mode 100644 index 5fd9ced1d436773b94e060b9eacdc1f4bb0e7e33..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/cross-chain-fund-transfer.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Ethereum
Ethereum
Substrate
Substrate
Actor
Actor
1. Send Lock Tx
1. Send Lock Tx
2. Emit Event
2. Emit Event
Bridge Relay
Bridge Relay
3. Read Event
3. Read Event
4. Send Tx Proof
4. Send Tx Proof
5. Grant Funds
5. Grant Funds
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/currency-exchange-pallet.svg b/polkadot/bridges/diagrams/currency-exchange-pallet.svg deleted file mode 100644 index 1f1b2ef7b5ce98da060efd829fcce4628f50c21b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/currency-exchange-pallet.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Transaction
Transaction
Parse Transaction
Parse Transaction
Yes
Yes
No
No
Is part of a finalized block?
Is part of a finalize...
Yes
Yes
Have funds already been claimed?
Have funds alrea...
Deposit into recipient account
Deposit into recipie...
Reward Submitter
Reward Submitter
End
End
A price feed would be needed for this
A price feed would b...
Convert from foreign currency into local currency
Convert from foreign...
No
No
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/ethereum-pallet.svg b/polkadot/bridges/diagrams/ethereum-pallet.svg deleted file mode 100644 index 934255be226084145acf75b55808d8d28bd1fa3e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/ethereum-pallet.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Import Signed Header
Import Signed Header
Import Header
Import Header
Count Valid and Invalid Headers
Count Valid and Inva...
No
No
Yes
Yes
Did we finalize any headers
Did we finalize any h...
Yes
Yes
No
No
Is Signed
Is Signed
Import Unsigned Header
Import Unsigned Head...
Import Header
Import Header
Reward Submitter
Reward Submitter
Did we receive valid headers?
Did we receive valid he...
Track Good Submitter
Track Good Submitter
Punish Bad Submitter
Punish Bad Submitter
Verify Header
Verify Header
Check for Authority Set Changes
Check for Authori...
Check if new header finalizes old headers
Check if new head...
Header
Header
Import Header
Import Header
Insert Header into Storage
Insert Header int...
Mark Headers as Finalized
Mark Headers as F...
Prune Old Headers
Prune Old Headers
Imported Block Hash + Finalized Headers
Imported Block Ha...
New Header
New Header
End
End
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/general-overview.svg b/polkadot/bridges/diagrams/general-overview.svg deleted file mode 100644 index d7706893ab9d147bc741ec879e8ef3996261c44d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/general-overview.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Bridge Relay
Bridge Relay
Solidity Smart Contract
Solidity Smart Contract
Grandpa Built-In
Grandpa Built-In
Ethereum PoA Network
Ethereum PoA Network
Substrate Node
Substrate Node
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/diagrams/parachain.svg b/polkadot/bridges/diagrams/parachain.svg deleted file mode 100644 index a1a15f172cf03a4704a90c8a40160809ca188e5a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/diagrams/parachain.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
Polkadot
Polkadot
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Substrate Based Chain A
Substrate Based Chain A
Substrate Based Chain B
Substrate Based Chain B
Ethereum PoA Chain
Ethereum PoA Chain
Bridge Relays
Bridge Relays
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/bridges/docs/high-level-overview.md b/polkadot/bridges/docs/high-level-overview.md deleted file mode 100644 index 763371bbf193f900c76a61a5da0588a0bca576dd..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/high-level-overview.md +++ /dev/null @@ -1,177 +0,0 @@ -# High-Level Bridge Documentation - -## Purpose - -Trustless connecting between two Substrate-based chains using GRANDPA finality. - -## Overview - -Even though we support two-way bridging, the documentation will generally talk about a one-sided -interaction. That's to say, we will only talk about syncing headers and messages from a _source_ -chain to a _target_ chain. This is because the two-sided interaction is really just the one-sided -interaction with the source and target chains switched. - -To understand the full interaction with the bridge, take a look at the -[testing scenarios](./testing-scenarios.md) document. It describes potential use cases and describes -how each of the layers outlined below is involved. - -The bridge is built from various components. Here is a quick overview of the important ones. - -### Header Sync - -A light client of the source chain built into the target chain's runtime. It is a single FRAME -pallet. It provides a "source of truth" about the source chain headers which have been finalized. -This is useful for higher level applications. - -### Headers Relayer - -A standalone application connected to both chains. It submits every source chain header it sees to -the target chain through RPC. - -### Message Delivery - -A FRAME pallet built on top of the header sync pallet. It allows users to submit messages to the -source chain, which are to be delivered to the target chain. The delivery protocol doesn't care -about the payload more than it has to. Handles replay protection and message ordering. - -### Message Dispatch - -A FRAME pallet responsible for interpreting the payload of delivered messages. - -### Message Relayer - -A standalone application handling delivery of the messages from source chain to the target chain. - -## Processes - -High level sequence charts of the process can be found in [a separate document](./high-level.html). - -### Substrate (GRANDPA) Header Sync - -The header sync pallet (`pallet-substrate-bridge`) is an on-chain light client for chains which use -GRANDPA finality. It is part of the target chain's runtime, and accepts headers from the source -chain. Its main goals are to accept valid headers, track GRANDPA finality set changes, and verify -GRANDPA finality proofs (a.k.a justifications). - -The pallet does not care about what block production mechanism is used for the source chain -(e.g Aura or BABE) as long as it uses the GRANDPA finality gadget. Due to this it is possible for -the pallet to import (but not necessarily finalize) headers which are _not_ valid according to the -source chain's block production mechanism. - -The pallet has support for tracking forks and uses the longest chain rule to determine what the -canonical chain is. The pallet allows headers to be imported on a different fork from the canonical -one as long as the headers being imported don't conflict with already finalized headers (for -example, it will not allow importing a header at a lower height than the best finalized header). - -When tracking authority set changes, the pallet - unlike the full GRANDPA protocol - does not -support tracking multiple authority set changes across forks. Each fork can have at most one pending -authority set change. This is done to prevent DoS attacks if GRANDPA on the source chain were to -stall for a long time (the pallet would have to do a lot of expensive ancestry checks to catch up). - -Referer to the [pallet documentation](../modules/substrate/src/lib.rs) for more details. - -#### Header Relayer strategy - -There is currently no reward strategy for the relayers at all. They also are not required to be -staked or registered on-chain, unlike in other bridge designs. We consider the header sync to be -an essential part of the bridge and the incentivisation should be happening on the higher layers. - -At the moment, signed transactions are the only way to submit headers to the header sync pallet. -However, in the future we would like to use unsigned transactions for headers delivery. This will -allow transaction de-duplication to be done at the transaction pool level and also remove the cost -for message relayers to run header relayers. - -### Message Passing - -Once header sync is maintained, the target side of the bridge can receive and verify proofs about -events happening on the source chain, or its internal state. On top of this, we built a message -passing protocol which consists of two parts described in following sections: message delivery and -message dispatch. - -#### Message Lanes Delivery - -The [Message delivery pallet](../modules/message-lane/src/lib.rs) is responsible for queueing up -messages and delivering them in order on the target chain. It also dispatches messages, but we will -cover that in the next section. - -The pallet supports multiple lanes (channels) where messages can be added. Every lane can be -considered completely independent from others, which allows them to make progress in parallel. -Different lanes can be configured to validated messages differently (e.g higher rewards, specific -types of payload, etc.) and may be associated with a particular "user application" built on top of -the bridge. Note that messages in the same lane MUST be delivered _in the same order_ they were -queued up. - -The message delivery protocol does not care about the payload it transports and can be coupled -with an arbitrary message dispatch mechanism that will interpret and execute the payload if delivery -conditions are met. Each delivery on the target chain is confirmed back to the source chain by the -relayer. This is so that she can collect the reward for delivering these messages. - -Users of the pallet add their messages to an "outbound lane" on the source chain. When a block is -finalized message relayers are responsible for reading the current queue of messages and submitting -some (or all) of them to the "inbound lane" of the target chain. Each message has a `nonce` -associated with it, which serves as the ordering of messages. The inbound lane stores the last -delivered nonce to prevent replaying messages. To succesfuly deliver the message to the inbound lane -on target chain the relayer has to present present a storage proof which shows that the message was -part of the outbound lane on the source chain. - -During delivery of messages they are immediately dispatched on the target chain and the relayer is -required to declare the correct `weight` to cater for all messages dispatch and pay all required -fees of the target chain. To make sure the relayer is incentivised to do so, on the source chain: -- the user provides a declared dispatch weight of the payload -- the pallet calculates the expected fee on the target chain based on the declared weight -- the pallet converts the target fee into source tokens (based on a price oracle) and reserves - enough tokens to cover for the delivery, dispatch, confirmation and additional relayers reward. - -If the declared weight turns out to be too low on the target chain the message is delivered but -it immediately fails to dispatch. The fee and reward is collected by the relayer upon confirmation -of delivery. - -Due to the fact that message lanes require delivery confirmation transactions, they also strictly -require bi-directional header sync (i.e. you can't use message delivery with one-way header sync). - -#### Dispatching Messages - -The [Message dispatch pallet](../modules/call-dispatch/src/lib.rs) is used to perform the actions -specified by messages which have come over the bridge. For Substrate-based chains this means -interpreting the source chain's message as a `Call` on the target chain. - -An example `Call` of the target chain would look something like this: - -```rust -target_runtime::Call::Balances(target_runtime::pallet_balances::Call::transfer(recipient, amount)) -``` - -When sending a `Call` it must first be SCALE encoded and then sent to the source chain. The `Call` -is then delivered by the message lane delivery mechanism from the source chain to the target chain. -When a message is received the inbound message lane on the target chain will try and decode the -message payload into a `Call` enum. If it's successful it will be dispatched after we check that the -weight of the call does not exceed the weight declared by the sender. The relayer pays fees for -executing the transaction on the target chain, but her costs should be covered by the sender on the -source chain. - -When dispatching messages there are three Origins which can be used by the target chain: -1. Root Origin -2. Source Origin -3. Target Origin - -Senders of a message can indicate which one of the three origins they would like to dispatch their -message with. However, there are restrictions on who/what is allowed to dispatch messages with a -particular origin. - -The Root origin represents the source chain's Root account on the target chain. This origin can can -only be dispatched on the target chain if the "send message" request was made by the Root origin of -the source chain - otherwise the message will fail to be dispatched. - -The Source origin represents an account without a private key on the target chain. This account will -be generated/derived using the account ID of the sender on the source chain. We don't necessarily -require the source account id to be associated with a private key on the source chain either. This -is useful for representing things such as source chain proxies or pallets. - -The Target origin represents an account with a private key on the target chain. The sender on the -source chain needs to prove ownership of this account by using their target chain private key to -sign: `(Call, SourceChainAccountId).encode()`. This will be included in the message payload and -verified by the target chain before dispatch. - -See [`CallOrigin` documentation](../modules/call-dispatch/src/lib.rs) for more details. - -#### Message Relayers Strategy diff --git a/polkadot/bridges/docs/high-level.html b/polkadot/bridges/docs/high-level.html deleted file mode 100644 index 3c4c6178c9517f2171356e97ad942877a457b429..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/high-level.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - High Level Bridge Components - - -

Header Sync

-

Header pallet on the target chain, keeps track of the forks, but requires finality for blocks that perform authority set changes. That means, it won't sync a fork with authority set change unless that change finalized.

-
- sequenceDiagram - participant Source Chain - participant Relayer - participant Target Chain - Note right of Target Chain: Best: 0, Finalized: 0 - Source Chain ->> Source Chain: Import Block 1 - Source Chain ->> Source Chain: Import Block 2 - Relayer ->> Target Chain: Submit Block 1 - Note right of Target Chain: Best: 1, Finalized: 0 - Relayer ->> Target Chain: Submit Block 2 - Note right of Target Chain: Best: 2, Finalized: 0 - Source Chain ->> Source Chain: Import Block 2' - Relayer ->> Target Chain: Submit Block 2' - Note right of Target Chain: Best: 2 or 2', Finalized: 0 - Source Chain ->> Source Chain: Finalize Block 2' - Relayer ->> Target Chain: Submit Finality of Block 2' - Note right of Target Chain: Best: 2', Finalized: 2' -
-

Message Delivery (single lane)

-

Pending messages are stored on-chain (source) so the relayer code is completely stateless - it can read all the details from the chain.

-

Delivering pending messages requires finality first.

-
- sequenceDiagram - participant Source Chain - participant Relayer - participant Target Chain - Source Chain ->> Source Chain: Queue Message 1 - Source Chain ->> Source Chain: Queue Message 2 - Source Chain ->> Source Chain: Queue Message 3 - Note left of Source Chain: Queued Messages: [1, 2, 3, ] - Note left of Source Chain: Reward for [1, 2, 3, ] reserved - Relayer ->> Target Chain: Deliver Messages 1..2 - Note right of Target Chain: Target chain dispatches the messages.
To Confirm: {1..2 => relayer_1} - Relayer ->> Source Chain: Delivery Confirmation of 1..2 - Note left of Source Chain: Queued Messages: [3, ] - Note left of Source Chain: Reward payout for [1, 2, ] - Relayer -->> Target Chain: Confirmed Messages 1..2 - Note right of Target Chain: To Confirm: {} - Note over Relayer, Target Chain: (this is not a separate transaction,
it's bundled with the "Deliver Messages" proof) -
- - - - diff --git a/polkadot/bridges/docs/plan.md b/polkadot/bridges/docs/plan.md deleted file mode 100644 index 9c4106d9ade8e0b87fe040bbdea4be9a5e0d886d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/plan.md +++ /dev/null @@ -1,22 +0,0 @@ -Plan for the Internal Audit: -1. High-level overview (describing layers, maybe with pictures) - - what have we done already. - [Tomek to present] - [Hernando to help with diagrams today] - -2. Demo? How to play with the network. - [Hernando] - -3. Demo of token transfer on Millau. - [Hernando] - -4. Go through the scenario description and let people ask questions in the meantime. - Jump to the code on demand. - [Tomek, Hernando, Slava] - - ... - -5. The roadmap - - outstanding issues. - [Tomek] - diff --git a/polkadot/bridges/docs/poa-eth.md b/polkadot/bridges/docs/poa-eth.md deleted file mode 100644 index 43b30f8bb737e41b541fb5079aaa77044627b47c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/poa-eth.md +++ /dev/null @@ -1,71 +0,0 @@ -# PoA Ethereum High Level Documentation - -NOTE: This is from the old README - -### Ethereum Bridge Runtime Module -The main job of this runtime module is to keep track of useful information an Ethereum PoA chain -which has been submitted by a bridge relayer. This includes: - - - Ethereum headers and their status (e.g are they the best header, are they finalized, etc.) - - Current validator set, and upcoming validator sets - -This runtime module has more responsibilties than simply storing headers and validator sets. It is -able to perform checks on the incoming headers to verify their general integrity, as well as whether -or not they've been finalized by the authorities on the PoA chain. - -This module is laid out as so: - -``` -├── ethereum -│ └── src -│ ├── error.rs // Runtime error handling -│ ├── finality.rs // Manage finality operations -│ ├── import.rs // Import new Ethereum headers -│ ├── lib.rs // Store headers and validator set info -│ ├── validators.rs // Track current and future PoA validator sets -│ └── verification.rs // Verify validity of incoming Ethereum headers -``` - -### Currency Exchange Runtime Module -The currency exchange module is used to faciliate cross-chain funds transfers. It works by accepting -a transaction which proves that funds were locked on one chain, and releases a corresponding amount -of funds on the recieving chain. - -For example: Alice would like to send funds from chain A to chain B. What she would do is send a -transaction to chain A indicating that she would like to send funds to an address on chain B. This -transaction would contain the amount of funds she would like to send, as well as the address of the -recipient on chain B. These funds would now be locked on chain A. Once the block containing this -"locked-funds" transaction is finalized it can be relayed to chain B. Chain B will verify that this -transaction was included in a finalized block on chain A, and if successful deposit funds into the -recipient account on chain B. - -Chain B would need a way to convert from a foreign currency to its local currency. How this is done -is left to the runtime developer for chain B. - -This module is one example of how an on-chain light client can be used to prove a particular action -was taken on a foreign chain. In particular it enables transfers of the foreign chain's native -currency, but more sophisticated modules such as ERC20 token transfers or arbitrary message transfers -are being worked on as well. - -## Ethereum Node -On the Ethereum side of things, we require two things. First, a Solidity smart contract to track the -Substrate headers which have been submitted to the bridge (by the relay), and a built-in contract to -be able to verify that headers have been finalized by the GRANDPA finality gadget. Together this -allows the Ethereum PoA chain to verify the integrity and finality of incoming Substrate headers. - -The Solidity smart contract is not part of this repo, but can be found -[here](https://github.com/svyatonik/substrate-bridge-sol/blob/master/substrate-bridge.sol) if you're -curious. We have the contract ABI in the `ethereum/relays/res` directory. - -## Rialto Runtime -The node runtime consists of several runtime modules, however not all of them are used at the same -time. When running an Ethereum PoA to Substrate bridge the modules required are the Ethereum module -and the currency exchange module. When running a Substrate to Substrate bridge the Substrate and -currency exchange modules are required. - -Below is a brief description of each of the runtime modules. - -## Bridge Relay -The bridge relay is responsible for syncing the chains which are being bridged, and passing messages -between them. The current implementation of the relay supportings syncing and interacting with -Ethereum PoA and Substrate chains. diff --git a/polkadot/bridges/docs/scenario1.html b/polkadot/bridges/docs/scenario1.html deleted file mode 100644 index 808a0c34f0dc588c90c80293be6d2b5518560090..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/scenario1.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - Flow Chart of Millau to Rialto Transfer - - -

Scenario: mDave sending RLT to rEve

-
- sequenceDiagram - participant mDave - participant Millau - participant Bridge Relayer - participant Rialto - participant rEve - Rialto->>Rialto: Endow r(mDave) with RLT. - mDave->>Millau: send_message(transfer, 5 RLT, rEve) - Millau->>Millau: Locks fee & reward for the relayer and queues the message. - rect rgb(205, 226, 244) - Bridge Relayer->>+Millau: What's your best header? - Millau-->>-Bridge Relayer: It's header 5. - Bridge Relayer->>+Rialto: What's the best Millau header you know about? - Rialto-->>-Bridge Relayer: I only know about 4. - Bridge Relayer->>Rialto: Cool, here is Millau header 5 [`submit_signed_header()`]. - Bridge Relayer->>+Rialto: What's the best finalized Millau header you know about? - Rialto-->>-Bridge Relayer: I only know about 3. - Bridge Relayer->>+Millau: Do you have a finality proof for 4..5? - Millau-->>-Bridge Relayer: Yes I do, here it is. - Bridge Relayer->>Rialto: Here is the finality proof for 5 [`finalize_header()`]. - end - rect rgb(218, 195, 244) - Bridge Relayer->>+Millau: Do you have any messages for me to deliver (at 5)? - Millau-->>-Bridge Relayer: Yes, here they are. - Bridge Relayer->>+Rialto: I have some new messages for you [`receive_messages_proof()`]. - Rialto->>Rialto: Validate and Dispatch Message. - Rialto->>rEve: Transfer(5 RLT) from r(mDave). - Rialto-->>-Bridge Relayer: Event(Message Succesfully Dispatched). - Bridge Relayer->>Millau: I sent your message, can I get paid now [`receive_messages_delivery_proof`]? - Millau-->>Bridge Relayer: Yes, here you go $$$. - Bridge Relayer ->>Rialto: These messages are confirmed now, feel free to clean up. - end -
- - - - diff --git a/polkadot/bridges/docs/send-message.md b/polkadot/bridges/docs/send-message.md deleted file mode 100644 index 243bf8ce599039d097ebebefd5466f00227b3ca8..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/send-message.md +++ /dev/null @@ -1,131 +0,0 @@ -# How to send messages - -The Substrate-to-Substrate relay comes with a command line interface (CLI) which is implemented -by the `substrate-relay` binary. - -``` -Substrate-to-Substrate relay - -USAGE: - substrate-relay - -FLAGS: - -h, --help - Prints help information - - -V, --version - Prints version information - - -SUBCOMMANDS: - help Prints this message or the help of the given subcommand(s) - init-bridge Initialize on-chain bridge pallet with current header data - relay-headers Start headers relay between two chains - relay-messages Start messages relay between two chains - send-message Send custom message over the bridge -``` -The relay related commands `relay-headers` and `relay-messages` are basically continously running a -sync loop between the `Millau` and `Rialto` chains. The `init-bridge` command submitts initialization -transactions. An initialization transaction brings an initial header and authorities set from a source -chain to a target chain. The header synchronization then starts from that header. - -For sending custom messages over an avialable bridge, the `send-message` command is used. - -``` -Send custom message over the bridge. - -Allows interacting with the bridge by sending messages over `MessageLane` component. The message is being sent to the -source chain, delivered to the target chain and dispatched there. - -USAGE: - substrate-relay send-message - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -SUBCOMMANDS: - help Prints this message or the help of the given subcommand(s) - millau-to-rialto Submit message to given Millau -> Rialto lane - rialto-to-millau Submit message to given Rialto -> Millau lane - -``` -Messages are send from a source chain to a target chain using a so called `message lane`. Message lanes handle -both, message transport and message dispatch. There is one command for submitting a message to each of the two -available bridges, namely `millau-to-rialto` and `rialto-to-millau`. - -Submitting a message requires a number of arguments to be provided. Those arguments are essentially the same -for both submit message commands, hence only the output for `millau-to-rialto` is shown below. - -``` -Submit message to given Millau -> Rialto lane - -USAGE: - substrate-relay send-message millau-to-rialto [OPTIONS] --lane --millau-host --millau-port --millau-signer --origin --rialto-signer - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -OPTIONS: - --fee - Delivery and dispatch fee. If not passed, determined automatically - - --lane Hex-encoded lane id - --millau-host Connect to Millau node at given host - --millau-port Connect to Millau node websocket server at given port - --millau-signer - The SURI of secret key to use when transactions are submitted to the Millau node - - --millau-signer-password - The password for the SURI of secret key to use when transactions are submitted to the Millau node - - --origin - The origin to use when dispatching the message on the target chain [possible values: Target, Source] - - --rialto-signer - The SURI of secret key to use when transactions are submitted to the Rialto node - - --rialto-signer-password - The password for the SURI of secret key to use when transactions are submitted to the Rialto node - - -SUBCOMMANDS: - help Prints this message or the help of the given subcommand(s) - remark Make an on-chain remark (comment) - transfer Transfer the specified `amount` of native tokens to a particular `recipient` - -``` -As can be seen from the output, there are two types of messages available: `remark` and `transfer`. -A remark is some opaque message which will be placed on-chain. For basic testing, a remark is -the easiest to go with. - -Usage of the arguments is best explained with an example. Below you can see, how a remark -would look like: - -``` -substrate-relay send-message millau-to-rialto \ - --millau-host=127.0.0.1 \ - --millau-port=10946 \ - --millau-signer=//Dave \ - --rialto-signer=//Dave \ - --lane=00000000 \ - --origin Target \ - remark -``` -Messages are basically regular transactions. That means, they have to be signed. In order -to send a message, you have to control an account private key on both, the source and -the target chain. Those accounts are specified using the `--millau-signer` and `--rialto-signer` -arguments in the example above. - -Message delivery and dispatch requires a fee to be paid. In the example above, we have not -specified the `--fee` argument. Hence, the fee will be estimated automatically. Note that -in order to pay the fee, the message sender account has to have sufficient funds available. - -The `--origin` argument allows to denote under which authority the message will be dispatched -on the target chain. Accepted values are `Target` and `Source`. - -Although not strictly necessary, it is recommended, to use one of the well-known development -accounts (`Alice`, `Bob`, `Charlie`, `Dave`, `Eve`) for message sending. Those accounts are -endowed with funds for fee payment. In addtion, the development `Seed URI` syntax -(like `//Dave`) for the signer can be used, which will remove the need for a password. diff --git a/polkadot/bridges/docs/testing-scenarios.md b/polkadot/bridges/docs/testing-scenarios.md deleted file mode 100644 index da2e9c0b4329f619dbd2b3ffc35c0c7b25646fe8..0000000000000000000000000000000000000000 --- a/polkadot/bridges/docs/testing-scenarios.md +++ /dev/null @@ -1,221 +0,0 @@ -# Testing Scenarios - -In the scenarios, for simplicity, we call the chains Kusama (KSM token) and Polkadot (DOT token), -but they should be applicable to any other chains. The first scenario has detailed description about -the entire process (also see the [sequence diagram](./scenario1.html)). Other scenarios only contain -a simplified interaction focusing on things that are unique for that particular scenario. - -Notation: -- kX - user X interacting with Kusama chain. -- `k(kX)` - Kusama account id of user kX (native account id; usable on Kusama) -- `p(kX)` - Polkadot account id of user kX (account id derived from `k(kX)` usable on Polkadot) -- [Kusama] ... - Interaction happens on Kusama (e.g. the user interacts with Kusama chain) -- [Polkadot] ... - Interaction happens on Polkadot - -Basic Scenarios -=========================== - -Scenario 1: Kusama's Alice receiving & spending DOTs ---------------------------- - -Kusama's Alice (kAlice) receives 5 DOTs from Polkadot's Bob (pBob) and sends half of them to -kCharlie. - -1. Generate kAlice's DOT address (`p(kAlice)`). - See function: - - ```rust - bp_runtime::derive_account_id(b"pdot", kAlice) - ``` - - or: - - ```rust - let hash = bp_polkadot::derive_kusama_account_id(kAlice); - let p_kAlice = bp_polkadot::AccountIdConverter::convert(hash); - ``` - -2. [Polkadot] pBob transfers 5 DOTs to `p(kAlice)` - 1. Creates & Signs a transaction with `Call::Transfer(..)` - 1. It is included in block. - 1. kAlice observers Polkadot chain to see her balance at `p(kAlice)` updated. - -3. [Kusama] kAlice sends 2.5 DOTs to `p(kCharlie)` - 1. kAlice prepares: - ```rust - let call = polkadot::Call::Balances(polkadot::Balances::Transfer(p(kCharlie), 2.5DOT)).encode(); - let weight = call.get_dispatch_info().weight; - ``` - - 1. kAlice prepares Kusama transaction: - ```rust - kusama::Call::MessageLane::::send_message( - // dot-transfer-lane (truncated to 4bytes) - lane_id, - payload: MessagePayload { - // Get from current polkadot runtime (kind of hardcoded) - spec_version: 1, - // kAlice should know the exact dispatch weight of the call on the target - // source verifies: at least to cover call.length() and below max weight - weight, - // simply bytes, we don't know anything about that on the source chain - call, - // origin that should be used during dispatch on the target chain - origin: CallOrigin::SourceAccount(kAlice), - }, - delivery_and_dispatch_fee: { - (single_message_delivery_weight - // source weight = X * target weight - + convert_target_weight_to_source_weight(weight) - + confirmation_transaction_weight - ) - // This uses an on-chain oracle to convert weights of the target chain to source fee - * weight_to_fee - // additional reward for the relayer (pallet parameter) - + relayers_fee - }, - ) - ``` - - 1. [Kusama] kAlice sends Kusama transaction with the above `Call` and pays regular fees. The - dispatch additionally reservers target-chain delivery and dispatch fees (including relayer's - reward). - -4. [Kusama] kAlice's transaction is included in block `B1` - -### Syncing headers loop - -5. Relayer sees that `B1` has not yet been delivered to the target chain. - [Sync loop code](https://github.com/paritytech/parity-bridges-common/blob/8b327a94595c4a6fae6d7866e24ecf2390501e32/relays/headers-relay/src/sync_loop.rs#L199). - -1. Relayer prepares transaction which delivers `B1` and with all of the missing - ancestors to the target chain (one header per transaction). - -1. After the transaction is succesfully dispatched the Polkadot on-chain light client of the Kusama - chain learns about block `B1` - it is stored in the on-chain storage. - -### Syncing finality loop - -8. Relayer is subscribed to finality events on Kusama. Relayer gets a finality notification for - block `B3`. - -1. The header sync informs the target chain about `B1..B3` blocks (see point 6). - -1. Relayer learns about missing finalization of `B1..B3` on the target chain, see - [finality maintenance code](https://github.com/paritytech/parity-bridges-common/blob/8b327a94595c4a6fae6d7866e24ecf2390501e32/relays/substrate/src/headers_maintain.rs#L107). - -1. Relayer submits justification for `B3` to the target chain (`finalize_header`). - See [#421](https://github.com/paritytech/parity-bridges-common/issues/421) for multiple - authority set changes support in Relayer (i.e. what block the target chain expects, not only - what I have). - - Relayer is doing two things: - - syncing on demand (what blocks miss finality) - - and syncing as notifications are received (recently finalized on-chain) - -1. Eventually Polkadot on-chain light client of Kusama learns about finality of `B1`. - -### Syncing messages loop - -13. The relayer checks the on-chain storage (last finalized header on the source, best header on the - target): - - Kusama outbound lane - - Polkadot inbound lane - Lanes contains `latest_generated_nonce` and `latest_received_nonce` respectively. The relayer - syncs messages between that range. - -1. The relayer gets a proof for every message in that range (using the RPC of message lanes module) - -1. The relayer creates a message delivery transaction (but it has weight, size, and count limits). - The count limit is there to make the loop of delivery code bounded. - ```rust - receive_message_proof( - relayer_id, // account id of the source chain - proof, // messages + proofs (hash of source block `B1`, nonces, lane_id + storage proof) - dispatch_weight // relayer declares how much it will take to dispatch all messages in that transaction, - ) - ``` - The `proof` can also contain an update of outbound lane state of source chain, which indicates - the delivery confirmation of these messages and reward payment, so that the target chain can - truncate its unpayed rewards vector. - - The target chain stores `relayer_ids` that delivered messages because the relayer can generate - a storage proof to show that they did indeed deliver those messages. The reward is paid on the - source chain and we inform the target chain about that fact so it can prune these `relayer_ids`. - - It's totally fine if there are no messages, and we only include the reward payment proof - when calling that function. - -1. 🥳 the message is now delivered and dispatched on the target chain! - -1. The relayer now needs to confirm the delivery to claim her payment and reward on the source - chain. - -1. The relayer creates a transaction on the source chain with call: - - ```rust - receive_messages_delivery_proof( - proof, // hash of the finalized target chain block, lane_id, storage proof - ) - ``` - -### UI challenges - -- The UI should warn before (or prevent) sending to `k(kCharlie)`! - - -Scenario 2: Kusama's Alice nominating validators with her DOTs ---------------------------- - -kAlice receives 10 DOTs from pBob and nominates `p(pCharlie)` and `p(pDave)`. - -1. Generate kAlice's DOT address (`p(kAlice)`) -2. [Polkadot] pBob transfers 5 DOTs to `p(kAlice)` -3. [Kusama] kAlice sends a batch transaction: - - `staking::Bond` transaction to create stash account choosing `p(kAlice)` as the controller account. - - `staking::Nominate(vec![p(pCharlie)])` to nominate pCharlie using the controller account. - - -Scenario 3: Kusama Treasury receiving & spending DOTs ---------------------------- - -pBob sends 15 DOTs to Kusama Treasury which Kusama Governance decides to transfer to kCharlie. - -1. Generate source account for the treasury (`kTreasury`). -2. [Polkadot] pBob tarnsfers 15 DOTs to `p(kTreasury)`. -2. [Kusama] Send a governance proposal to send a bridge message which transfers funds to `p(kCharlie)`. -3. [Kusama] Dispatch the governance proposal using `kTreasury` account id. - -Extra scenarios -=========================== - -Scenario 4: Kusama's Alice setting up 1-of-2 multi-sig to spend from either Kusama or Polkadot ---------------------------- - -Assuming `p(pAlice)` has at least 7 DOTs already. - -1. Generate multisig account id: `pMultiSig = multi_account_id(&[p(kAlice), p(pAlice)], 1)`. -2. [Kusama] Transfer 7 DOTs to `pMultiSig` using `TargetAccount` origin of `pAlice`. -3. [Kusama] Transfer 2 DOTs to `p(kAlice)` from the multisig: - - Send `multisig::as_multi_threshold_1(vec![p(pAlice)], balances::Transfer(p(kAlice), 2))` - -Scenario 5: Kusama Treasury staking & nominating validators with DOTs ---------------------------- - -Scenario 6: Kusama Treasury voting in Polkadot's democracy proposal ---------------------------- - -Potentially interesting scenarios -=========================== - -Scenario 7: Polkadot's Bob spending his DOTs by using Kusama chain ---------------------------- - -We can assume he holds KSM. Problem: he can pay fees, but can't really send (sign) a transaction? -Shall we support some kind of dispatcher? - -Scenario 8: Kusama Governance taking over Kusama's Alice DOT holdings ---------------------------- - -We use `SourceRoot` call to transfer her's DOTs to Kusama treasury. Source chain root -should also be able to send messages as `CallOrigin::SourceAccount(Alice)` though. diff --git a/polkadot/bridges/modules/call-dispatch/Cargo.toml b/polkadot/bridges/modules/call-dispatch/Cargo.toml deleted file mode 100644 index 64910df861c381e982b304f7fc844954eb9079ed..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/call-dispatch/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "pallet-bridge-call-dispatch" -description = "A Substrate Runtime module that dispatches a bridge message, treating it simply as encoded Call" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } - -# Bridge dependencies - -bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -serde = "1.0" - -[features] -default = ["std"] -std = [ - "bp-message-dispatch/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/modules/call-dispatch/README.md b/polkadot/bridges/modules/call-dispatch/README.md deleted file mode 100644 index 0351aa9e503bc957e41ffae9cc3edf856c622564..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/call-dispatch/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# Call Dispatch Module - -The call dispatch module has a single internal (only callable by other runtime modules) entry point -for dispatching encoded calls (`pallet_bridge_call_dispatch::Module::dispatch`). Every dispatch -(successful or not) emits a corresponding module event. The module doesn't have any call-related -requirements - they may come from the bridged chain over some message lane, or they may be crafted -locally. But in this document we'll mostly talk about this module in the context of bridges. - -Every message that is being dispatched has three main characteristics: -- `bridge` is the 4-bytes identifier of the bridge where this message comes from. This may be the - identifier of the bridged chain (like `b"rlto"` for messages coming from `Rialto`), or the - identifier of the bridge itself (`b"rimi"` for `Rialto` <-> `Millau` bridge); -- `id` is the unique id of the message within the given bridge. For messages coming from the - [message lane module](../message-lane/README.md), it may worth to use a tuple - `(LaneId, MessageNonce)` to identify a message; -- `message` is the `pallet_bridge_call_dispatch::MessagePayload` structure. The `call` field is set - to the (potentially) encoded `Call` of this chain. - -The easiest way to understand what is happening when a `Call` is being dispatched, is to look at the -module events set: - -- `MessageRejected` event is emitted if a message has been rejected even before it has reached the - module. Dispatch then is called just to reflect the fact that message has been received, but we - have failed to pre-process it (e.g. because we have failed to decode `MessagePayload` structure - from the proof); -- `MessageVersionSpecMismatch` event is emitted if current runtime specification version differs - from the version that has been used to encode the `Call`. The message payload has the - `spec_version`, that is filled by the message submitter. If this value differs from the current - runtime version, dispatch mechanism rejects to dispatch the message. Without this check, we may - decode the wrong `Call` for example if method arguments were changed; -- `MessageCallDecodeFailed` event is emitted if we have failed to decode `Call` from the payload. - This may happen if the submitter has provided incorrect value in the `call` field, or if source - chain storage has been corrupted. The `Call` is decoded after `spec_version` check, so we'll never - try to decode `Call` from other runtime version; -- `MessageSignatureMismatch` event is emitted if submitter has chose to dispatch message using - specified this chain account (`pallet_bridge_call_dispatch::CallOrigin::TargetAccount` origin), - but he has failed to prove that he owns the private key for this account; -- `MessageCallRejected` event is emitted if the module has been deployed with some call filter and - this filter has rejected the `Call`. In your bridge you may choose to reject all messages except - e.g. balance transfer calls; -- `MessageWeightMismatch` event is emitted if the message submitter has specified invalid `Call` - dispatch weight in the `weight` field of the message payload. The value of this field is compared - to the pre-dispatch weight of the decoded `Call`. If it is less than the actual pre-dispatch - weight, the dispatch is rejected. Keep in mind, that even if post-dispatch weight will be less - than specified, the submitter still have to declare (and pay for) the maximal possible weight - (that is the pre-dispatch weight); -- `MessageDispatched` event is emitted if the message has passed all checks and we have actually - dispatched it. The dispatch may still fail, though - that's why we are including the dispatch - result in the event payload. - -When we talk about module in context of bridges, these events are helping in following cases: - -1. when the message submitter has access to the state of both chains and wants to monitor what has - happened with his message. Then he could use the message id (that he gets from the - [message lane module events](../message-lane/README.md#General-Information)) to filter events of - call dispatch module at the target chain and actually see what has happened with his message; - -1. when the message submitter only has access to the source chain state (for example, when sender is - the runtime module at the source chain). In this case, your bridge may have additional mechanism - to deliver dispatch proofs (which are storage proof of module events) back to the source chain, - thus allowing the submitter to see what has happened with his messages. diff --git a/polkadot/bridges/modules/call-dispatch/src/lib.rs b/polkadot/bridges/modules/call-dispatch/src/lib.rs deleted file mode 100644 index 1f0da202829aea6ca5c5245e24b4804a00f53fe2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/call-dispatch/src/lib.rs +++ /dev/null @@ -1,859 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module which takes care of dispatching messages received over the bridge. -//! -//! The messages are interpreted directly as runtime `Call`. We attempt to decode -//! them and then dispatch as usual. To prevent compatibility issues, the Calls have -//! to include a `spec_version`. This will be checked before dispatch. In the case of -//! a succesful dispatch an event is emitted. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] - -use bp_message_dispatch::{MessageDispatch, Weight}; -use bp_runtime::{derive_account_id, InstanceId, Size, SourceAccount}; -use codec::{Decode, Encode}; -use frame_support::{ - decl_event, decl_module, decl_storage, - dispatch::{Dispatchable, Parameter}, - ensure, - traits::{Filter, Get}, - weights::{extract_actual_weight, GetDispatchInfo}, - RuntimeDebug, -}; -use frame_system::RawOrigin; -use sp_runtime::{ - traits::{BadOrigin, Convert, IdentifyAccount, MaybeDisplay, MaybeSerializeDeserialize, Member, Verify}, - DispatchResult, -}; -use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; - -/// Spec version type. -pub type SpecVersion = u32; - -/// Origin of a Call when it is dispatched on the target chain. -/// -/// The source chain can (and should) verify that the message can be dispatched on the target chain -/// with a particular origin given the source chain's origin. This can be done with the -/// `verify_message_origin()` function. -#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq)] -pub enum CallOrigin { - /// Call is sent by the Root origin on the source chain. On the target chain it is dispatched - /// from a derived account. - /// - /// The derived account represents the source Root account on the target chain. This is useful - /// if the target chain needs some way of knowing that a call came from a privileged origin on - /// the source chain (maybe to allow a configuration change for example). - SourceRoot, - - /// Call is sent by `SourceChainAccountId` on the source chain. On the target chain it is - /// dispatched from an account controlled by a private key on the target chain. - /// - /// The account can be identified by `TargetChainAccountPublic`. The proof that the - /// `SourceChainAccountId` controls `TargetChainAccountPublic` is the `TargetChainSignature` - /// over `(Call, SourceChainAccountId, TargetChainSpecVersion, SourceChainBridgeId).encode()`. - /// - /// NOTE sending messages using this origin (or any other) does not have replay protection! - /// The assumption is that both the source account and the target account is controlled by - /// the same entity, so source-chain replay protection is sufficient. - /// As a consequence, it's extremely important for the target chain user to never produce - /// a signature with their target-private key on something that could be sent over the bridge, - /// i.e. if the target user signs `(, Call::Transfer(X, 5))` - /// The owner of `some-source-account-id` can send that message multiple times, which would - /// result with multiple transfer calls being dispatched on the target chain. - /// So please, NEVER USE YOUR PRIVATE KEY TO SIGN SOMETHING YOU DON'T FULLY UNDERSTAND! - TargetAccount(SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature), - - /// Call is sent by the `SourceChainAccountId` on the source chain. On the target chain it is - /// dispatched from a derived account ID. - /// - /// The account ID on the target chain is derived from the source account ID This is useful if - /// you need a way to represent foreign accounts on this chain for call dispatch purposes. - /// - /// Note that the derived account does not need to have a private key on the target chain. This - /// origin can therefore represent proxies, pallets, etc. as well as "regular" accounts. - SourceAccount(SourceChainAccountId), -} - -/// Message payload type used by call-dispatch module. -#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq)] -pub struct MessagePayload { - /// Runtime specification version. We only dispatch messages that have the same - /// runtime version. Otherwise we risk to misinterpret encoded calls. - pub spec_version: SpecVersion, - /// Weight of the call, declared by the message sender. If it is less than actual - /// static weight, the call is not dispatched. - pub weight: Weight, - /// Call origin to be used during dispatch. - pub origin: CallOrigin, - /// The call itself. - pub call: Call, -} - -impl Size - for MessagePayload> -{ - fn size_hint(&self) -> u32 { - self.call.len() as _ - } -} - -/// The module configuration trait. -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; - /// Id of the message. Whenever message is passed to the dispatch module, it emits - /// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if - /// it comes from message-lane module. - type MessageId: Parameter; - /// Type of account ID on source chain. - type SourceChainAccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default; - /// Type of account public key on target chain. - type TargetChainAccountPublic: Parameter + IdentifyAccount; - /// Type of signature that may prove that the message has been signed by - /// owner of `TargetChainAccountPublic`. - type TargetChainSignature: Parameter + Verify; - /// The overarching dispatch call type. - type Call: Parameter - + GetDispatchInfo - + Dispatchable< - Origin = ::Origin, - PostInfo = frame_support::dispatch::PostDispatchInfo, - >; - /// Pre-dispatch filter for incoming calls. - /// - /// The pallet will filter all incoming calls right before they're dispatched. If this filter - /// rejects the call, special event (`Event::MessageCallRejected`) is emitted. - type CallFilter: Filter<>::Call>; - /// The type that is used to wrap the `Self::Call` when it is moved over bridge. - /// - /// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure - /// that all other stuff (like `spec_version`) is ok. If we would try to decode - /// `Call` which has been encoded using previous `spec_version`, then we might end - /// up with decoding error, instead of `MessageVersionSpecMismatch`. - type EncodedCall: Decode + Encode + Into>::Call, ()>>; - /// A type which can be turned into an AccountId from a 256-bit hash. - /// - /// Used when deriving target chain AccountIds from source chain AccountIds. - type AccountIdConverter: sp_runtime::traits::Convert; -} - -decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as CallDispatch {} -} - -decl_event!( - pub enum Event where - >::MessageId - { - /// Message has been rejected before reaching dispatch. - MessageRejected(InstanceId, MessageId), - /// Message has been rejected by dispatcher because of spec version mismatch. - /// Last two arguments are: expected and passed spec version. - MessageVersionSpecMismatch(InstanceId, MessageId, SpecVersion, SpecVersion), - /// Message has been rejected by dispatcher because of weight mismatch. - /// Last two arguments are: expected and passed call weight. - MessageWeightMismatch(InstanceId, MessageId, Weight, Weight), - /// Message signature mismatch. - MessageSignatureMismatch(InstanceId, MessageId), - /// Message has been dispatched with given result. - MessageDispatched(InstanceId, MessageId, DispatchResult), - /// We have failed to decode Call from the message. - MessageCallDecodeFailed(InstanceId, MessageId), - /// The call from the message has been rejected by the call filter. - MessageCallRejected(InstanceId, MessageId), - /// Phantom member, never used. Needed to handle multiple pallet instances. - _Dummy(PhantomData), - } -); - -decl_module! { - /// Call Dispatch FRAME Pallet. - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; - } -} - -impl, I: Instance> MessageDispatch for Module { - type Message = - MessagePayload; - - fn dispatch_weight(message: &Self::Message) -> Weight { - message.weight - } - - fn dispatch(bridge: InstanceId, id: T::MessageId, message: Result) { - // emit special even if message has been rejected by external component - let message = match message { - Ok(message) => message, - Err(_) => { - frame_support::debug::trace!("Message {:?}/{:?}: rejected before actual dispatch", bridge, id); - Self::deposit_event(RawEvent::MessageRejected(bridge, id)); - return; - } - }; - - // verify spec version - // (we want it to be the same, because otherwise we may decode Call improperly) - let expected_version = ::Version::get().spec_version; - if message.spec_version != expected_version { - frame_support::debug::trace!( - "Message {:?}/{:?}: spec_version mismatch. Expected {:?}, got {:?}", - bridge, - id, - expected_version, - message.spec_version, - ); - Self::deposit_event(RawEvent::MessageVersionSpecMismatch( - bridge, - id, - expected_version, - message.spec_version, - )); - return; - } - - // now that we have spec version checked, let's decode the call - let call = match message.call.into() { - Ok(call) => call, - Err(_) => { - frame_support::debug::trace!("Failed to decode Call from message {:?}/{:?}", bridge, id,); - Self::deposit_event(RawEvent::MessageCallDecodeFailed(bridge, id)); - return; - } - }; - - // prepare dispatch origin - let origin_account = match message.origin { - CallOrigin::SourceRoot => { - let hex_id = derive_account_id::(bridge, SourceAccount::Root); - let target_id = T::AccountIdConverter::convert(hex_id); - frame_support::debug::trace!("Root Account: {:?}", &target_id); - target_id - } - CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => { - let digest = account_ownership_digest(&call, source_account_id, message.spec_version, bridge); - - let target_account = target_public.into_account(); - if !target_signature.verify(&digest[..], &target_account) { - frame_support::debug::trace!( - "Message {:?}/{:?}: origin proof is invalid. Expected account: {:?} from signature: {:?}", - bridge, - id, - target_account, - target_signature, - ); - Self::deposit_event(RawEvent::MessageSignatureMismatch(bridge, id)); - return; - } - - frame_support::debug::trace!("Target Account: {:?}", &target_account); - target_account - } - CallOrigin::SourceAccount(source_account_id) => { - let hex_id = derive_account_id(bridge, SourceAccount::Account(source_account_id)); - let target_id = T::AccountIdConverter::convert(hex_id); - frame_support::debug::trace!("Source Account: {:?}", &target_id); - target_id - } - }; - - // filter the call - if !T::CallFilter::filter(&call) { - frame_support::debug::trace!( - "Message {:?}/{:?}: the call ({:?}) is rejected by filter", - bridge, - id, - call, - ); - Self::deposit_event(RawEvent::MessageCallRejected(bridge, id)); - return; - } - - // verify weight - // (we want passed weight to be at least equal to pre-dispatch weight of the call - // because otherwise Calls may be dispatched at lower price) - let dispatch_info = call.get_dispatch_info(); - let expected_weight = dispatch_info.weight; - if message.weight < expected_weight { - frame_support::debug::trace!( - "Message {:?}/{:?}: passed weight is too low. Expected at least {:?}, got {:?}", - bridge, - id, - expected_weight, - message.weight, - ); - Self::deposit_event(RawEvent::MessageWeightMismatch( - bridge, - id, - expected_weight, - message.weight, - )); - return; - } - - // finally dispatch message - let origin = RawOrigin::Signed(origin_account).into(); - - frame_support::debug::trace!("Message being dispatched is: {:?}", &call); - let dispatch_result = call.dispatch(origin); - let actual_call_weight = extract_actual_weight(&dispatch_result, &dispatch_info); - - frame_support::debug::trace!( - "Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}", - bridge, - id, - actual_call_weight, - message.weight, - dispatch_result, - ); - - Self::deposit_event(RawEvent::MessageDispatched( - bridge, - id, - dispatch_result.map(drop).map_err(|e| e.error), - )); - } -} - -/// Check if the message is allowed to be dispatched on the target chain given the sender's origin -/// on the source chain. -/// -/// For example, if a message is sent from a "regular" account on the source chain it will not be -/// allowed to be dispatched as Root on the target chain. This is a useful check to do on the source -/// chain _before_ sending a message whose dispatch will be rejected on the target chain. -pub fn verify_message_origin( - sender_origin: &RawOrigin, - message: &MessagePayload, -) -> Result, BadOrigin> -where - SourceChainAccountId: PartialEq + Clone, -{ - match message.origin { - CallOrigin::SourceRoot => { - ensure!(sender_origin == &RawOrigin::Root, BadOrigin); - Ok(None) - } - CallOrigin::TargetAccount(ref source_account_id, _, _) => { - ensure!( - sender_origin == &RawOrigin::Signed(source_account_id.clone()), - BadOrigin - ); - Ok(Some(source_account_id.clone())) - } - CallOrigin::SourceAccount(ref source_account_id) => { - ensure!( - sender_origin == &RawOrigin::Signed(source_account_id.clone()), - BadOrigin - ); - Ok(Some(source_account_id.clone())) - } - } -} - -/// Target account ownership digest from the source chain. -/// -/// The byte vector returned by this function will be signed with a target chain account -/// private key. This way, the owner of `source_account_id` on the source chain proves that -/// the target chain account private key is also under his control. -pub fn account_ownership_digest( - call: &Call, - source_account_id: AccountId, - target_spec_version: SpecVersion, - source_instance_id: BridgeId, -) -> Vec -where - Call: Encode, - AccountId: Encode, - SpecVersion: Encode, - BridgeId: Encode, -{ - let mut proof = Vec::new(); - call.encode_to(&mut proof); - source_account_id.encode_to(&mut proof); - target_spec_version.encode_to(&mut proof); - source_instance_id.encode_to(&mut proof); - - proof -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use frame_support::{parameter_types, weights::Weight}; - use frame_system::{EventRecord, Phase}; - use sp_core::H256; - use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, - }; - - type AccountId = u64; - type MessageId = [u8; 4]; - - #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)] - pub struct TestAccountPublic(AccountId); - - impl IdentifyAccount for TestAccountPublic { - type AccountId = AccountId; - - fn into_account(self) -> AccountId { - self.0 - } - } - - #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)] - pub struct TestSignature(AccountId); - - impl Verify for TestSignature { - type Signer = TestAccountPublic; - - fn verify>(&self, _msg: L, signer: &AccountId) -> bool { - self.0 == *signer - } - } - - pub struct AccountIdConverter; - - impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: H256) -> AccountId { - hash.to_low_u64_ne() - } - } - - type Block = frame_system::mocking::MockBlock; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - - use crate as call_dispatch; - - frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - CallDispatch: call_dispatch::{Pallet, Call, Event}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - } - - impl Config for TestRuntime { - type Event = Event; - type MessageId = MessageId; - type SourceChainAccountId = AccountId; - type TargetChainAccountPublic = TestAccountPublic; - type TargetChainSignature = TestSignature; - type Call = Call; - type CallFilter = TestCallFilter; - type EncodedCall = EncodedCall; - type AccountIdConverter = AccountIdConverter; - } - - #[derive(Decode, Encode)] - pub struct EncodedCall(Vec); - - impl From for Result { - fn from(call: EncodedCall) -> Result { - Call::decode(&mut &call.0[..]).map_err(drop) - } - } - - pub struct TestCallFilter; - - impl Filter for TestCallFilter { - fn filter(call: &Call) -> bool { - !matches!(*call, Call::System(frame_system::Call::fill_block(_))) - } - } - - const TEST_SPEC_VERSION: SpecVersion = 0; - const TEST_WEIGHT: Weight = 1_000_000_000; - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - sp_io::TestExternalities::new(t) - } - - fn prepare_message( - origin: CallOrigin, - call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { - MessagePayload { - spec_version: TEST_SPEC_VERSION, - weight: TEST_WEIGHT, - origin, - call: EncodedCall(call.encode()), - } - } - - fn prepare_root_message( - call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { - prepare_message(CallOrigin::SourceRoot, call) - } - - fn prepare_target_message( - call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { - let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1)); - prepare_message(origin, call) - } - - fn prepare_source_message( - call: Call, - ) -> as MessageDispatch<::MessageId>>::Message { - let origin = CallOrigin::SourceAccount(1); - prepare_message(origin, call) - } - - #[test] - fn should_fail_on_spec_version_mismatch() { - new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); - let id = [0; 4]; - - const BAD_SPEC_VERSION: SpecVersion = 99; - let mut message = - prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); - message.spec_version = BAD_SPEC_VERSION; - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageVersionSpecMismatch( - bridge, - id, - TEST_SPEC_VERSION, - BAD_SPEC_VERSION - )), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_fail_on_weight_mismatch() { - new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); - let id = [0; 4]; - let mut message = - prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); - message.weight = 0; - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageWeightMismatch( - bridge, id, 1973000, 0, - )), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_fail_on_signature_mismatch() { - new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); - let id = [0; 4]; - - let call_origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(99)); - let message = prepare_message( - call_origin, - Call::System(>::remark(vec![1, 2, 3])), - ); - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageSignatureMismatch( - bridge, id - )), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_emit_event_for_rejected_messages() { - new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); - let id = [0; 4]; - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Err(())); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageRejected(bridge, id)), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_fail_on_call_decode() { - new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); - let id = [0; 4]; - - let mut message = - prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); - message.call.0 = vec![]; - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageCallDecodeFailed( - bridge, id - )), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_emit_event_for_rejected_calls() { - new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); - let id = [0; 4]; - - let call = Call::System(>::fill_block(Perbill::from_percent(75))); - let weight = call.get_dispatch_info().weight; - let mut message = prepare_root_message(call); - message.weight = weight; - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageCallRejected(bridge, id)), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_dispatch_bridge_message_from_root_origin() { - new_test_ext().execute_with(|| { - let bridge = b"ethb".to_owned(); - let id = [0; 4]; - let message = prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageDispatched( - bridge, - id, - Ok(()) - )), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_dispatch_bridge_message_from_target_origin() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - let bridge = b"ethb".to_owned(); - - let call = Call::System(>::remark(vec![])); - let message = prepare_target_message(call); - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageDispatched( - bridge, - id, - Ok(()) - )), - topics: vec![], - }], - ); - }) - } - - #[test] - fn should_dispatch_bridge_message_from_source_origin() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - let bridge = b"ethb".to_owned(); - - let call = Call::System(>::remark(vec![])); - let message = prepare_source_message(call); - - System::set_block_number(1); - CallDispatch::dispatch(bridge, id, Ok(message)); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::call_dispatch(call_dispatch::Event::::MessageDispatched( - bridge, - id, - Ok(()) - )), - topics: vec![], - }], - ); - }) - } - - #[test] - fn origin_is_checked_when_verifying_sending_message_using_source_root_account() { - let call = Call::System(>::remark(vec![])); - let message = prepare_root_message(call); - - // When message is sent by Root, CallOrigin::SourceRoot is allowed - assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(None))); - - // when message is sent by some real account, CallOrigin::SourceRoot is not allowed - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(1), &message), - Err(BadOrigin) - )); - } - - #[test] - fn origin_is_checked_when_verifying_sending_message_using_target_account() { - let call = Call::System(>::remark(vec![])); - let message = prepare_target_message(call); - - // When message is sent by Root, CallOrigin::TargetAccount is not allowed - assert!(matches!( - verify_message_origin(&RawOrigin::Root, &message), - Err(BadOrigin) - )); - - // When message is sent by some other account, it is rejected - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(2), &message), - Err(BadOrigin) - )); - - // When message is sent by a real account, it is allowed to have origin - // CallOrigin::TargetAccount - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(1), &message), - Ok(Some(1)) - )); - } - - #[test] - fn origin_is_checked_when_verifying_sending_message_using_source_account() { - let call = Call::System(>::remark(vec![])); - let message = prepare_source_message(call); - - // Sending a message from the expected origin account works - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(1), &message), - Ok(Some(1)) - )); - - // If we send a message from a different account, it is rejected - assert!(matches!( - verify_message_origin(&RawOrigin::Signed(2), &message), - Err(BadOrigin) - )); - - // If we try and send the message from Root, it is also rejected - assert!(matches!( - verify_message_origin(&RawOrigin::Root, &message), - Err(BadOrigin) - )); - } -} diff --git a/polkadot/bridges/modules/currency-exchange/Cargo.toml b/polkadot/bridges/modules/currency-exchange/Cargo.toml deleted file mode 100644 index cf58b5e81c345d1c8cc6b0c6f3e4d9c066b6a6f9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/currency-exchange/Cargo.toml +++ /dev/null @@ -1,46 +0,0 @@ -[package] -name = "pallet-bridge-currency-exchange" -description = "A Substrate Runtime module that accepts 'lock funds' transactions from a peer chain and grants an equivalent amount to a the appropriate Substrate account." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-currency-exchange/std", - "bp-header-chain/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "serde", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking", - "sp-std", -] diff --git a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs b/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs deleted file mode 100644 index d10dd3c6841268700075dc74035fc87278ab383f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Exchange module complexity is mostly determined by callbacks, defined by runtime. -//! So we are giving runtime opportunity to prepare environment and construct proof -//! before invoking module calls. - -use super::{ - Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, Instance, Module as CurrencyExchangeModule, -}; -use sp_std::prelude::*; - -use frame_benchmarking::{account, benchmarks_instance}; -use frame_system::RawOrigin; - -const SEED: u32 = 0; -const WORST_TX_SIZE_FACTOR: u32 = 1000; -const WORST_PROOF_SIZE_FACTOR: u32 = 1000; - -/// Module we're benchmarking here. -pub struct Module, I: Instance>(CurrencyExchangeModule); - -/// Proof benchmarking parameters. -pub struct ProofParams { - /// Funds recipient. - pub recipient: Recipient, - /// When true, recipient must exists before import. - pub recipient_exists: bool, - /// When 0, transaction should have minimal possible size. When this value has non-zero value n, - /// transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. - pub transaction_size_factor: u32, - /// When 0, proof should have minimal possible size. When this value has non-zero value n, - /// proof size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. - pub proof_size_factor: u32, -} - -/// Config that must be implemented by runtime. -pub trait Config: CurrencyExchangeConfig { - /// Prepare proof for importing exchange transaction. - fn make_proof( - proof_params: ProofParams, - ) -> <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof; -} - -benchmarks_instance! { - // Benchmark `import_peer_transaction` extrinsic with the best possible conditions: - // * Proof is the transaction itself. - // * Transaction has minimal size. - // * Recipient account exists. - import_peer_transaction_best_case { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic when recipient account does not exists. - import_peer_transaction_when_recipient_does_not_exists { - let i in 1..100; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: 0, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when transaction size increases. - import_peer_transaction_when_transaction_size_increases { - let i in 1..100; - let n in 1..WORST_TX_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: n, - proof_size_factor: 0, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` when proof size increases. - import_peer_transaction_when_proof_size_increases { - let i in 1..100; - let n in 1..WORST_PROOF_SIZE_FACTOR; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: true, - transaction_size_factor: 0, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - - // Benchmark `import_peer_transaction` extrinsic with the worst possible conditions: - // * Proof is large. - // * Transaction has large size. - // * Recipient account does not exists. - import_peer_transaction_worst_case { - let i in 1..100; - let m in WORST_TX_SIZE_FACTOR..WORST_TX_SIZE_FACTOR+1; - let n in WORST_PROOF_SIZE_FACTOR..WORST_PROOF_SIZE_FACTOR+1; - - let recipient: T::AccountId = account("recipient", i, SEED); - let proof = T::make_proof(ProofParams { - recipient: recipient.clone(), - recipient_exists: false, - transaction_size_factor: m, - proof_size_factor: n, - }); - }: import_peer_transaction(RawOrigin::Signed(recipient), proof) - -} diff --git a/polkadot/bridges/modules/currency-exchange/src/lib.rs b/polkadot/bridges/modules/currency-exchange/src/lib.rs deleted file mode 100644 index 54e4558d89efe12f16d231e67123717b8131489c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/currency-exchange/src/lib.rs +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows tokens exchange between two bridged chains. - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_currency_exchange::{ - CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction, RecipientsMap, -}; -use bp_header_chain::InclusionProofVerifier; -use frame_support::{decl_error, decl_module, decl_storage, ensure}; -use sp_runtime::DispatchResult; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -/// Called when transaction is submitted to the exchange module. -pub trait OnTransactionSubmitted { - /// Called when valid transaction is submitted and accepted by the module. - fn on_valid_transaction_submitted(submitter: AccountId); -} - -/// The module configuration trait -pub trait Config: frame_system::Config { - /// Handler for transaction submission result. - type OnTransactionSubmitted: OnTransactionSubmitted; - /// Represents the blockchain that we'll be exchanging currency with. - type PeerBlockchain: InclusionProofVerifier; - /// Peer blockchain transaction parser. - type PeerMaybeLockFundsTransaction: MaybeLockFundsTransaction< - Transaction = ::Transaction, - >; - /// Map between blockchains recipients. - type RecipientsMap: RecipientsMap< - PeerRecipient = ::Recipient, - Recipient = Self::AccountId, - >; - /// This blockchain currency amount type. - type Amount; - /// Converter from peer blockchain currency type into current blockchain currency type. - type CurrencyConverter: CurrencyConverter< - SourceAmount = ::Amount, - TargetAmount = Self::Amount, - >; - /// Something that could grant money. - type DepositInto: DepositInto; -} - -decl_error! { - pub enum Error for Module, I: Instance> { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockhain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, - /// Transaction is not finalized. - UnfinalizedTransaction, - /// Transaction funds are already claimed. - AlreadyClaimed, - } -} - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Imports lock fund transaction of the peer blockchain. - #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_peer_transaction( - origin, - proof: <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, - ) -> DispatchResult { - let submitter = frame_system::ensure_signed(origin)?; - - // verify and parse transaction proof - let deposit = prepare_deposit_details::(&proof)?; - - // make sure to update the mapping if we deposit successfully to avoid double spending, - // i.e. whenever `deposit_into` is successful we MUST update `Transfers`. - { - // if any changes were made to the storage, we can't just return error here, because - // otherwise the same proof may be imported again - let deposit_result = T::DepositInto::deposit_into(deposit.recipient, deposit.amount); - match deposit_result { - Ok(_) => (), - Err(ExchangeError::DepositPartiallyFailed) => (), - Err(error) => return Err(Error::::from(error).into()), - } - Transfers::::insert(&deposit.transfer_id, ()) - } - - // reward submitter for providing valid message - T::OnTransactionSubmitted::on_valid_transaction_submitted(submitter); - - frame_support::debug::trace!( - target: "runtime", - "Completed currency exchange: {:?}", - deposit.transfer_id, - ); - - Ok(()) - } - } -} - -decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as Bridge { - /// All transfers that have already been claimed. - Transfers: map hasher(blake2_128_concat) ::Id => (); - } -} - -impl, I: Instance> Module { - /// Returns true if currency exchange module is able to import given transaction proof in - /// its current state. - pub fn filter_transaction_proof( - proof: &::TransactionInclusionProof, - ) -> bool { - if let Err(err) = prepare_deposit_details::(proof) { - frame_support::debug::trace!( - target: "runtime", - "Can't accept exchange transaction: {:?}", - err, - ); - - return false; - } - - true - } -} - -impl, I: Instance> From for Error { - fn from(error: ExchangeError) -> Self { - match error { - ExchangeError::InvalidTransaction => Error::InvalidTransaction, - ExchangeError::InvalidAmount => Error::InvalidAmount, - ExchangeError::InvalidRecipient => Error::InvalidRecipient, - ExchangeError::FailedToMapRecipients => Error::FailedToMapRecipients, - ExchangeError::FailedToConvertCurrency => Error::FailedToConvertCurrency, - ExchangeError::DepositFailed => Error::DepositFailed, - ExchangeError::DepositPartiallyFailed => Error::DepositPartiallyFailed, - } - } -} - -impl OnTransactionSubmitted for () { - fn on_valid_transaction_submitted(_: AccountId) {} -} - -/// Exchange deposit details. -struct DepositDetails, I: Instance> { - /// Transfer id. - pub transfer_id: ::Id, - /// Transfer recipient. - pub recipient: ::Recipient, - /// Transfer amount. - pub amount: ::TargetAmount, -} - -/// Verify and parse transaction proof, preparing everything required for importing -/// this transaction proof. -fn prepare_deposit_details, I: Instance>( - proof: &<>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, -) -> Result, Error> { - // ensure that transaction is included in finalized block that we know of - let transaction = >::PeerBlockchain::verify_transaction_inclusion_proof(proof) - .ok_or(Error::::UnfinalizedTransaction)?; - - // parse transaction - let transaction = - >::PeerMaybeLockFundsTransaction::parse(&transaction).map_err(Error::::from)?; - let transfer_id = transaction.id; - ensure!( - !Transfers::::contains_key(&transfer_id), - Error::::AlreadyClaimed - ); - - // grant recipient - let recipient = T::RecipientsMap::map(transaction.recipient).map_err(Error::::from)?; - let amount = T::CurrencyConverter::convert(transaction.amount).map_err(Error::::from)?; - - Ok(DepositDetails { - transfer_id, - recipient, - amount, - }) -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use bp_currency_exchange::LockFundsTransaction; - use frame_support::{assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight}; - use sp_core::H256; - use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, - }; - - type AccountId = u64; - - const INVALID_TRANSACTION_ID: u64 = 100; - const ALREADY_CLAIMED_TRANSACTION_ID: u64 = 101; - const UNKNOWN_RECIPIENT_ID: u64 = 0; - const INVALID_AMOUNT: u64 = 0; - const MAX_DEPOSIT_AMOUNT: u64 = 1000; - const SUBMITTER: u64 = 2000; - - type RawTransaction = LockFundsTransaction; - - pub struct DummyTransactionSubmissionHandler; - - impl OnTransactionSubmitted for DummyTransactionSubmissionHandler { - fn on_valid_transaction_submitted(submitter: AccountId) { - Transfers::::insert(submitter, ()); - } - } - - pub struct DummyBlockchain; - - impl InclusionProofVerifier for DummyBlockchain { - type Transaction = RawTransaction; - type TransactionInclusionProof = (bool, RawTransaction); - - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { - if proof.0 { - Some(proof.1.clone()) - } else { - None - } - } - } - - pub struct DummyTransaction; - - impl MaybeLockFundsTransaction for DummyTransaction { - type Transaction = RawTransaction; - type Id = u64; - type Recipient = AccountId; - type Amount = u64; - - fn parse(tx: &Self::Transaction) -> bp_currency_exchange::Result { - match tx.id { - INVALID_TRANSACTION_ID => Err(ExchangeError::InvalidTransaction), - _ => Ok(tx.clone()), - } - } - } - - pub struct DummyRecipientsMap; - - impl RecipientsMap for DummyRecipientsMap { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map(peer_recipient: Self::PeerRecipient) -> bp_currency_exchange::Result { - match peer_recipient { - UNKNOWN_RECIPIENT_ID => Err(ExchangeError::FailedToMapRecipients), - _ => Ok(peer_recipient * 10), - } - } - } - - pub struct DummyCurrencyConverter; - - impl CurrencyConverter for DummyCurrencyConverter { - type SourceAmount = u64; - type TargetAmount = u64; - - fn convert(amount: Self::SourceAmount) -> bp_currency_exchange::Result { - match amount { - INVALID_AMOUNT => Err(ExchangeError::FailedToConvertCurrency), - _ => Ok(amount * 10), - } - } - } - - pub struct DummyDepositInto; - - impl DepositInto for DummyDepositInto { - type Recipient = AccountId; - type Amount = u64; - - fn deposit_into(_recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> { - match amount { - amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()), - amount if amount == MAX_DEPOSIT_AMOUNT * 10 => Err(ExchangeError::DepositPartiallyFailed), - _ => Err(ExchangeError::DepositFailed), - } - } - } - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - use crate as pallet_bridge_currency_exchange; - - construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Exchange: pallet_bridge_currency_exchange::{Pallet}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - } - - impl Config for TestRuntime { - type OnTransactionSubmitted = DummyTransactionSubmissionHandler; - type PeerBlockchain = DummyBlockchain; - type PeerMaybeLockFundsTransaction = DummyTransaction; - type RecipientsMap = DummyRecipientsMap; - type Amount = u64; - type CurrencyConverter = DummyCurrencyConverter; - type DepositInto = DummyDepositInto; - } - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - sp_io::TestExternalities::new(t) - } - - fn transaction(id: u64) -> RawTransaction { - RawTransaction { - id, - recipient: 1, - amount: 2, - } - } - - #[test] - fn unfinalized_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (false, transaction(0))), - Error::::UnfinalizedTransaction, - ); - }); - } - - #[test] - fn invalid_transaction_rejected() { - new_test_ext().execute_with(|| { - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(INVALID_TRANSACTION_ID)), - ), - Error::::InvalidTransaction, - ); - }); - } - - #[test] - fn claimed_transaction_rejected() { - new_test_ext().execute_with(|| { - ::Transfers::insert(ALREADY_CLAIMED_TRANSACTION_ID, ()); - assert_noop!( - Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(ALREADY_CLAIMED_TRANSACTION_ID)), - ), - Error::::AlreadyClaimed, - ); - }); - } - - #[test] - fn transaction_with_unknown_recipient_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.recipient = UNKNOWN_RECIPIENT_ID; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToMapRecipients, - ); - }); - } - - #[test] - fn transaction_with_invalid_amount_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = INVALID_AMOUNT; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::FailedToConvertCurrency, - ); - }); - } - - #[test] - fn transaction_with_invalid_deposit_rejected() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT + 1; - assert_noop!( - Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), - Error::::DepositFailed, - ); - }); - } - - #[test] - fn valid_transaction_accepted_even_if_deposit_partially_fails() { - new_test_ext().execute_with(|| { - let mut transaction = transaction(0); - transaction.amount = MAX_DEPOSIT_AMOUNT; - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction), - ),); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } - - #[test] - fn valid_transaction_accepted() { - new_test_ext().execute_with(|| { - assert_ok!(Exchange::import_peer_transaction( - Origin::signed(SUBMITTER), - (true, transaction(0)), - ),); - - // ensure that the transfer has been marked as completed - assert!(::Transfers::contains_key(0u64)); - // ensure that submitter has been rewarded - assert!(::Transfers::contains_key(SUBMITTER)); - }); - } -} diff --git a/polkadot/bridges/modules/ethereum-contract/builtin/Cargo.toml b/polkadot/bridges/modules/ethereum-contract/builtin/Cargo.toml deleted file mode 100644 index d17b0ba7e1180e74b3515abc7dae400fa3ba82f3..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum-contract/builtin/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "ethereum-contract-builtin" -description = "Small crate that helps Solidity contract to verify finality proof." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -ethereum-types = "0.11.0" -finality-grandpa = "0.14.0" -hex = "0.4" -log = "0.4.14" - -# Runtime/chain specific dependencies - -rialto-runtime = { path = "../../../bin/rialto/runtime" } - -# Substrate Dependencies - -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/modules/ethereum-contract/builtin/src/lib.rs b/polkadot/bridges/modules/ethereum-contract/builtin/src/lib.rs deleted file mode 100644 index 5762d510b2a90538699d613f6ef578cc883a6bae..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum-contract/builtin/src/lib.rs +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use codec::{Decode, Encode}; -use ethereum_types::U256; -use finality_grandpa::voter_set::VoterSet; -use rialto_runtime::{Block, BlockNumber, Hash, Header as RuntimeHeader}; -use sp_blockchain::Error as ClientError; -use sp_finality_grandpa::{AuthorityList, ConsensusLog, GRANDPA_ENGINE_ID}; - -/// Builtin errors. -#[derive(Debug)] -pub enum Error { - /// Failed to decode block number. - BlockNumberDecode, - /// Failed to decode Substrate header. - HeaderDecode(codec::Error), - /// Failed to decode best voters set. - BestSetDecode(codec::Error), - /// Best voters set is invalid. - InvalidBestSet, - /// Failed to decode finality proof. - FinalityProofDecode(codec::Error), - /// Failed to verify justification. - JustificationVerify(Box), -} - -/// Substrate header. -#[derive(Debug, PartialEq)] -pub struct Header { - /// Header hash. - pub hash: Hash, - /// Parent header hash. - pub parent_hash: Hash, - /// Header number. - pub number: BlockNumber, - /// GRANDPA validators change signal. - pub signal: Option, -} - -/// GRANDPA validators set change signal. -#[derive(Debug, PartialEq)] -pub struct ValidatorsSetSignal { - /// Signal delay. - pub delay: BlockNumber, - /// New validators set. - pub validators: Vec, -} - -/// Convert from U256 to BlockNumber. Fails if `U256` value isn't fitting within `BlockNumber` -/// limits (the runtime referenced by this module uses u32 as `BlockNumber`). -pub fn to_substrate_block_number(number: U256) -> Result { - let substrate_block_number = match number == number.low_u32().into() { - true => Ok(number.low_u32()), - false => Err(Error::BlockNumberDecode), - }; - - log::trace!( - target: "bridge-builtin", - "Parsed Substrate block number from {}: {:?}", - number, - substrate_block_number, - ); - - substrate_block_number -} - -/// Convert from BlockNumber to U256. -pub fn from_substrate_block_number(number: BlockNumber) -> Result { - Ok(U256::from(number as u64)) -} - -/// Parse Substrate header. -pub fn parse_substrate_header(raw_header: &[u8]) -> Result { - let substrate_header = RuntimeHeader::decode(&mut &*raw_header) - .map(|header| Header { - hash: header.hash(), - parent_hash: header.parent_hash, - number: header.number, - signal: sp_runtime::traits::Header::digest(&header) - .log(|log| { - log.as_consensus().and_then(|(engine_id, log)| { - if engine_id == GRANDPA_ENGINE_ID { - Some(log) - } else { - None - } - }) - }) - .and_then(|log| ConsensusLog::decode(&mut &*log).ok()) - .and_then(|log| match log { - ConsensusLog::ScheduledChange(scheduled_change) => Some(ValidatorsSetSignal { - delay: scheduled_change.delay, - validators: scheduled_change.next_authorities.encode(), - }), - _ => None, - }), - }) - .map_err(Error::HeaderDecode); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate header {}: {:?}", - if substrate_header.is_ok() { - format!("<{}-bytes-blob>", raw_header.len()) - } else { - hex::encode(raw_header) - }, - substrate_header, - ); - - substrate_header -} - -/// Verify GRANDPA finality proof. -pub fn verify_substrate_finality_proof( - finality_target_number: BlockNumber, - finality_target_hash: Hash, - best_set_id: u64, - raw_best_set: &[u8], - raw_finality_proof: &[u8], -) -> Result<(), Error> { - let best_set = AuthorityList::decode(&mut &*raw_best_set) - .map_err(Error::BestSetDecode) - .and_then(|authorities| VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet)); - - log::debug!( - target: "bridge-builtin", - "Parsed Substrate authorities set {}: {:?}", - if best_set.is_ok() { - format!("<{}-bytes-blob>", raw_best_set.len()) - } else { - hex::encode(raw_best_set) - }, - best_set, - ); - - let best_set = best_set?; - - let verify_result = sc_finality_grandpa::GrandpaJustification::::decode_and_verify_finalizes( - &raw_finality_proof, - (finality_target_hash, finality_target_number), - best_set_id, - &best_set, - ) - .map_err(Box::new) - .map_err(Error::JustificationVerify) - .map(|_| ()); - - log::debug!( - target: "bridge-builtin", - "Verified Substrate finality proof {}: {:?}", - if verify_result.is_ok() { - format!("<{}-bytes-blob>", raw_finality_proof.len()) - } else { - hex::encode(raw_finality_proof) - }, - verify_result, - ); - - verify_result -} - -#[cfg(test)] -mod tests { - use super::*; - use rialto_runtime::DigestItem; - use sp_core::crypto::Public; - use sp_finality_grandpa::{AuthorityId, ScheduledChange}; - use sp_runtime::generic::Digest; - - #[test] - fn to_substrate_block_number_succeeds() { - assert_eq!(to_substrate_block_number(U256::zero()).unwrap(), 0); - assert_eq!( - to_substrate_block_number(U256::from(std::u32::MAX as u64)).unwrap(), - 0xFFFFFFFF - ); - } - - #[test] - fn to_substrate_block_number_fails() { - assert!(matches!( - to_substrate_block_number(U256::from(std::u32::MAX as u64 + 1)), - Err(Error::BlockNumberDecode) - )); - } - - #[test] - fn from_substrate_block_number_succeeds() { - assert_eq!(from_substrate_block_number(0).unwrap(), U256::zero()); - assert_eq!( - from_substrate_block_number(std::u32::MAX).unwrap(), - U256::from(std::u32::MAX) - ); - } - - #[test] - fn substrate_header_without_signal_parsed() { - let raw_header = RuntimeHeader { - parent_hash: [0u8; 32].into(), - number: 0, - state_root: "b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e7" - .parse() - .unwrap(), - extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" - .parse() - .unwrap(), - digest: Default::default(), - } - .encode(); - assert_eq!( - raw_header, - hex::decode("000000000000000000000000000000000000000000000000000000000000000000b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e703170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c11131400").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "afbbeb92bf6ff14f60bdef0aa89f043dd403659ae82665238810ace0d761f6d0" - .parse() - .unwrap(), - parent_hash: Default::default(), - number: 0, - signal: None, - }, - ); - } - - #[test] - fn substrate_header_with_signal_parsed() { - let authorities = vec![ - (AuthorityId::from_slice(&[1; 32]), 101), - (AuthorityId::from_slice(&[3; 32]), 103), - ]; - let mut digest = Digest::default(); - digest.push(DigestItem::Consensus( - GRANDPA_ENGINE_ID, - ConsensusLog::ScheduledChange(ScheduledChange { - next_authorities: authorities.clone(), - delay: 8, - }) - .encode(), - )); - - let raw_header = RuntimeHeader { - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - state_root: "822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aa" - .parse() - .unwrap(), - extrinsics_root: "e7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928" - .parse() - .unwrap(), - digest, - } - .encode(); - assert_eq!( - raw_header, - hex::decode("c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b20822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aae7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928040446524e4b59010108010101010101010101010101010101010101010101010101010101010101010165000000000000000303030303030303030303030303030303030303030303030303030303030303670000000000000008000000").unwrap(), - ); - - assert_eq!( - parse_substrate_header(&raw_header).unwrap(), - Header { - hash: "3dfebb280bd87a4640f89d7f2adecd62b88148747bff5b63af6e1634ee37a56e" - .parse() - .unwrap(), - parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" - .parse() - .unwrap(), - number: 8, - signal: Some(ValidatorsSetSignal { - delay: 8, - validators: authorities.encode(), - }), - }, - ); - } - - /// Number of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8; - /// Hash of the example block with justification. - const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775"; - /// Id of authorities set that have generated example justification. Could be computed by tracking - /// every set change in canonized headers. - const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0; - /// Encoded authorities set that has generated example justification. Could be fetched from `ScheduledChange` - /// digest of the block that has scheduled this set OR by calling `GrandpaApi::grandpa_authorities()` at - /// appropriate block. - const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000"; - /// Example justification. Could be fetched by calling 'chain_getBlock' RPC. - const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900"; - - #[test] - fn substrate_header_parse_fails() { - assert!(matches!(parse_substrate_header(&[]), Err(_))); - } - - #[test] - fn verify_substrate_finality_proof_succeeds() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_block_is_finalized() { - verify_substrate_finality_proof( - 4, - Default::default(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - EXAMPLE_AUTHORITIES_SET_ID, - &hex::decode("deadbeef").unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_set_id_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 42, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), - ) - .unwrap_err(); - } - - #[test] - fn verify_substrate_finality_proof_fails_when_wrong_proof_is_provided() { - verify_substrate_finality_proof( - EXAMPLE_JUSTIFIED_BLOCK_NUMBER, - EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), - 0, - &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), - &hex::decode("deadbeef").unwrap(), - ) - .unwrap_err(); - } -} diff --git a/polkadot/bridges/modules/ethereum/Cargo.toml b/polkadot/bridges/modules/ethereum/Cargo.toml deleted file mode 100644 index 1912f45babb5a43a9c8c1a12eff2be33317433e2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "pallet-bridge-eth-poa" -description = "A Substrate Runtime module that is able to verify PoA headers and their finality." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"], optional = true } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-eth-poa = { path = "../../primitives/ethereum-poa", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -libsecp256k1 = { version = "0.3.4", features = ["hmac"] } -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "bp-eth-poa/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "serde", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking", - "libsecp256k1", -] diff --git a/polkadot/bridges/modules/ethereum/src/benchmarking.rs b/polkadot/bridges/modules/ethereum/src/benchmarking.rs deleted file mode 100644 index beb8ba2a0e5ca2a939d61eb35f1a613d2b3a4fbe..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/benchmarking.rs +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use super::{ - BridgeStorage, - RawOrigin, - HeadersByNumber, - Instance, - Config, -}; - -use crate::test_utils::{ - build_custom_header, build_genesis_header, insert_header, validator_utils::*, validators_change_receipt, - HeaderBuilder, -}; - -use bp_eth_poa::{compute_merkle_root, U256}; -use frame_benchmarking::benchmarks_instance; -use frame_system::RawOrigin; - -benchmarks_instance! { - // Benchmark `import_unsigned_header` extrinsic with the best possible conditions: - // * Parent header is finalized. - // * New header doesn't require receipts. - // * Nothing is finalized by new header. - // * Nothing is pruned by new header. - import_unsigned_header_best_case { - let n in 1..1000; - - let num_validators = 2; - let initial_header = initialize_bench::(num_validators); - - // prepare header to be inserted - let header = build_custom_header( - &validator(1), - &initial_header, - |mut header| { - header.gas_limit = header.gas_limit + U256::from(n); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 1); - assert_eq!(storage.finalized_block().number, 0); - } - - // Our goal with this bench is to try and see the effect that finalizing difference ranges of - // blocks has on our import time. As such we need to make sure that we keep the number of - // validators fixed while changing the number blocks finalized (the complexity parameter) by - // importing the last header. - // - // One important thing to keep in mind is that the runtime provides a finality cache in order to - // reduce the overhead of header finalization. However, this is only triggered every 16 blocks. - import_unsigned_finality { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 1..7; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // Basically the exact same as `import_unsigned_finality` but with a different range for the - // complexity parameter. In this bench we use a larger range of blocks to see how performance - // changes when the finality cache kicks in (>16 blocks). - import_unsigned_finality_with_cache { - // Our complexity parameter, n, will represent the number of blocks imported before - // finalization. - let n in 7..100; - - let mut storage = BridgeStorage::::new(); - let num_validators: u32 = 2; - let initial_header = initialize_bench::(num_validators as usize); - - // Since we only have two validators we need to make sure the number of blocks is even to - // make sure the right validator signs the final block - let num_blocks = 2 * n; - let mut headers = Vec::new(); - let mut parent = initial_header.clone(); - - // Import a bunch of headers without any verification, will ensure that they're not - // finalized prematurely - for i in 1..=num_blocks { - let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - headers.push(header.clone()); - parent = header; - } - - let last_header = headers.last().unwrap().clone(); - let last_authority = validator(1); - - // Need to make sure that the header we're going to import hasn't been inserted - // into storage already - let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); - assert_eq!(storage.finalized_block().number, num_blocks as u64); - } - - // A block import may trigger a pruning event, which adds extra work to the import progress. - // In this bench we trigger a pruning event in order to see how much extra time is spent by the - // runtime dealing with it. In the Ethereum Pallet, we're limited pruning to eight blocks in a - // single import, as dictated by MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT. - import_unsigned_pruning { - let n in 1..MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT as u32; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 3; - let initial_header = initialize_bench::(num_validators as usize); - let validators = validators(num_validators); - - // Want to prune eligible blocks between [0, n) - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: n as u64, - }); - - let mut parent = initial_header; - for i in 1..=n { - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - parent = header; - } - - let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); - }: import_unsigned_header(RawOrigin::None, header, None) - verify { - let storage = BridgeStorage::::new(); - let max_pruned: u64 = (n - 1) as _; - assert_eq!(storage.best_block().0.number, (n + 1) as u64); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&max_pruned).is_none()); - } - - // The goal of this bench is to import a block which contains a transaction receipt. The receipt - // will contain a validator set change. Verifying the receipt root is an expensive operation to - // do, which is why we're interested in benchmarking it. - import_unsigned_with_receipts { - let n in 1..100; - - let mut storage = BridgeStorage::::new(); - - let num_validators = 1; - let initial_header = initialize_bench::(num_validators as usize); - - let mut receipts = vec![]; - for i in 1..=n { - let receipt = validators_change_receipt(Default::default()); - receipts.push(receipt) - } - let encoded_receipts = receipts.iter().map(|r| r.rlp()); - - // We need this extra header since this is what signals a validator set transition. This - // will ensure that the next header is within the "Contract" window - let header1 = HeaderBuilder::with_parent(&initial_header).sign_by(&validator(0)); - insert_header(&mut storage, header1.clone()); - - let header = build_custom_header( - &validator(0), - &header1, - |mut header| { - // Logs Bloom signals a change in validator set - header.log_bloom = (&[0xff; 256]).into(); - header.receipts_root = compute_merkle_root(encoded_receipts); - header - }, - ); - }: import_unsigned_header(RawOrigin::None, header, Some(receipts)) - verify { - let storage = BridgeStorage::::new(); - assert_eq!(storage.best_block().0.number, 2); - } -} - -fn initialize_bench, I: Instance>(num_validators: usize) -> AuraHeader { - // Initialize storage with some initial header - let initial_header = build_genesis_header(&validator(0)); - let initial_difficulty = initial_header.difficulty; - let initial_validators = validators_addresses(num_validators as usize); - - initialize_storage::(&initial_header, initial_difficulty, &initial_validators); - - initial_header -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, TestRuntime}; - use frame_support::assert_ok; - - #[test] - fn insert_unsigned_header_best_case() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_header_best_case::()); - }); - } - - #[test] - fn insert_unsigned_header_finality() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality::()); - }); - } - - #[test] - fn insert_unsigned_header_finality_with_cache() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_finality_with_cache::()); - }); - } - - #[test] - fn insert_unsigned_header_pruning() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_pruning::()); - }); - } - - #[test] - fn insert_unsigned_header_receipts() { - run_test(1, |_| { - assert_ok!(test_benchmark_import_unsigned_with_receipts::()); - }); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/error.rs b/polkadot/bridges/modules/ethereum/src/error.rs deleted file mode 100644 index 50dccd6ea2ced8c975435f6dd320adc6b8f66130..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/error.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use sp_runtime::RuntimeDebug; - -/// Header import error. -#[derive(Clone, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(PartialEq))] -pub enum Error { - /// The header is beyond last finalized and can not be imported. - AncientHeader = 0, - /// The header is already imported. - KnownHeader = 1, - /// Seal has an incorrect format. - InvalidSealArity = 2, - /// Block number isn't sensible. - RidiculousNumber = 3, - /// Block has too much gas used. - TooMuchGasUsed = 4, - /// Gas limit header field is invalid. - InvalidGasLimit = 5, - /// Extra data is of an invalid length. - ExtraDataOutOfBounds = 6, - /// Timestamp header overflowed. - TimestampOverflow = 7, - /// The parent header is missing from the blockchain. - MissingParentBlock = 8, - /// The header step is missing from the header. - MissingStep = 9, - /// The header signature is missing from the header. - MissingSignature = 10, - /// Empty steps are missing from the header. - MissingEmptySteps = 11, - /// The same author issued different votes at the same step. - DoubleVote = 12, - /// Validation proof insufficient. - InsufficientProof = 13, - /// Difficulty header field is invalid. - InvalidDifficulty = 14, - /// The received block is from an incorrect proposer. - NotValidator = 15, - /// Missing transaction receipts for the operation. - MissingTransactionsReceipts = 16, - /// Redundant transaction receipts are provided. - RedundantTransactionsReceipts = 17, - /// Provided transactions receipts are not matching the header. - TransactionsReceiptsMismatch = 18, - /// Can't accept unsigned header from the far future. - UnsignedTooFarInTheFuture = 19, - /// Trying to finalize sibling of finalized block. - TryingToFinalizeSibling = 20, - /// Header timestamp is ahead of on-chain timestamp - HeaderTimestampIsAhead = 21, -} - -impl Error { - pub fn msg(&self) -> &'static str { - match *self { - Error::AncientHeader => "Header is beyound last finalized and can not be imported", - Error::KnownHeader => "Header is already imported", - Error::InvalidSealArity => "Header has an incorrect seal", - Error::RidiculousNumber => "Header has too large number", - Error::TooMuchGasUsed => "Header has too much gas used", - Error::InvalidGasLimit => "Header has invalid gas limit", - Error::ExtraDataOutOfBounds => "Header has too large extra data", - Error::TimestampOverflow => "Header has too large timestamp", - Error::MissingParentBlock => "Header has unknown parent hash", - Error::MissingStep => "Header is missing step seal", - Error::MissingSignature => "Header is missing signature seal", - Error::MissingEmptySteps => "Header is missing empty steps seal", - Error::DoubleVote => "Header has invalid step in seal", - Error::InsufficientProof => "Header has insufficient proof", - Error::InvalidDifficulty => "Header has invalid difficulty", - Error::NotValidator => "Header is sealed by unexpected validator", - Error::MissingTransactionsReceipts => "The import operation requires transactions receipts", - Error::RedundantTransactionsReceipts => "Redundant transactions receipts are provided", - Error::TransactionsReceiptsMismatch => "Invalid transactions receipts provided", - Error::UnsignedTooFarInTheFuture => "The unsigned header is too far in future", - Error::TryingToFinalizeSibling => "Trying to finalize sibling of finalized block", - Error::HeaderTimestampIsAhead => "Header timestamp is ahead of on-chain timestamp", - } - } - - /// Return unique error code. - pub fn code(&self) -> u8 { - *self as u8 - } -} diff --git a/polkadot/bridges/modules/ethereum/src/finality.rs b/polkadot/bridges/modules/ethereum/src/finality.rs deleted file mode 100644 index 608708a0c7badeb59fcfbc43dd1e41ce6b63a291..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/finality.rs +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::Storage; -use bp_eth_poa::{public_to_address, Address, AuraHeader, HeaderId, SealedEmptyStep, H256}; -use codec::{Decode, Encode}; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::RuntimeDebug; -use sp_std::collections::{ - btree_map::{BTreeMap, Entry}, - btree_set::BTreeSet, - vec_deque::VecDeque, -}; -use sp_std::prelude::*; - -/// Cached finality votes for given block. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct CachedFinalityVotes { - /// True if we have stopped at best finalized block' sibling. This means - /// that we are trying to finalize block from fork that has forked before - /// best finalized. - pub stopped_at_finalized_sibling: bool, - /// Header ancestors that were read while we have been searching for - /// cached votes entry. Newest header has index 0. - pub unaccounted_ancestry: VecDeque<(HeaderId, Option, AuraHeader)>, - /// Cached finality votes, if they have been found. The associated - /// header is not included into `unaccounted_ancestry`. - pub votes: Option>, -} - -/// Finality effects. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct FinalityEffects { - /// Finalized headers. - pub finalized_headers: Vec<(HeaderId, Option)>, - /// Finality votes used in computation. - pub votes: FinalityVotes, -} - -/// Finality votes for given block. -#[derive(RuntimeDebug, Decode, Encode)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct FinalityVotes { - /// Number of votes per each validator. - pub votes: BTreeMap, - /// Ancestry blocks with oldest ancestors at the beginning and newest at the - /// end of the queue. - pub ancestry: VecDeque>, -} - -/// Information about block ancestor that is used in computations. -#[derive(RuntimeDebug, Decode, Encode)] -#[cfg_attr(test, derive(Clone, Default, PartialEq))] -pub struct FinalityAncestor { - /// Bock id. - pub id: HeaderId, - /// Block submitter. - pub submitter: Option, - /// Validators that have signed this block and empty steps on top - /// of this block. - pub signers: BTreeSet
, -} - -/// Tries to finalize blocks when given block is imported. -/// -/// Returns numbers and hashes of finalized blocks in ascending order. -pub fn finalize_blocks( - storage: &S, - best_finalized: HeaderId, - header_validators: (HeaderId, &[Address]), - id: HeaderId, - submitter: Option<&S::Submitter>, - header: &AuraHeader, - two_thirds_majority_transition: u64, -) -> Result, Error> { - // compute count of voters for every unfinalized block in ancestry - let validators = header_validators.1.iter().collect(); - let votes = prepare_votes( - header - .parent_id() - .map(|parent_id| { - storage.cached_finality_votes(&parent_id, &best_finalized, |hash| { - *hash == header_validators.0.hash || *hash == best_finalized.hash - }) - }) - .unwrap_or_default(), - best_finalized, - &validators, - id, - header, - submitter.cloned(), - )?; - - // now let's iterate in reverse order && find just finalized blocks - let mut finalized_headers = Vec::new(); - let mut current_votes = votes.votes.clone(); - for ancestor in &votes.ancestry { - if !is_finalized( - &validators, - ¤t_votes, - ancestor.id.number >= two_thirds_majority_transition, - ) { - break; - } - - remove_signers_votes(&ancestor.signers, &mut current_votes); - finalized_headers.push((ancestor.id, ancestor.submitter.clone())); - } - - Ok(FinalityEffects { - finalized_headers, - votes, - }) -} - -/// Returns true if there are enough votes to treat this header as finalized. -fn is_finalized( - validators: &BTreeSet<&Address>, - votes: &BTreeMap, - requires_two_thirds_majority: bool, -) -> bool { - (!requires_two_thirds_majority && votes.len() * 2 > validators.len()) - || (requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2) -} - -/// Prepare 'votes' of header and its ancestors' signers. -pub(crate) fn prepare_votes( - mut cached_votes: CachedFinalityVotes, - best_finalized: HeaderId, - validators: &BTreeSet<&Address>, - id: HeaderId, - header: &AuraHeader, - submitter: Option, -) -> Result, Error> { - // if we have reached finalized block sibling, then we're trying - // to switch finalized blocks - if cached_votes.stopped_at_finalized_sibling { - return Err(Error::TryingToFinalizeSibling); - } - - // this fn can only work with single validators set - if !validators.contains(&header.author) { - return Err(Error::NotValidator); - } - - // now we have votes that were valid when some block B has been inserted - // things may have changed a bit, but we do not need to read anything else - // from the db, because we have ancestry - // so the only thing we need to do is: - // 1) remove votes from blocks that have been finalized after B has been inserted; - // 2) add votes from B descendants - let mut votes = cached_votes.votes.unwrap_or_default(); - - // remove votes from finalized blocks - while let Some(old_ancestor) = votes.ancestry.pop_front() { - if old_ancestor.id.number > best_finalized.number { - votes.ancestry.push_front(old_ancestor); - break; - } - - remove_signers_votes(&old_ancestor.signers, &mut votes.votes); - } - - // add votes from new blocks - let mut parent_empty_step_signers = empty_steps_signers(header); - let mut unaccounted_ancestry = VecDeque::new(); - while let Some((ancestor_id, ancestor_submitter, ancestor)) = cached_votes.unaccounted_ancestry.pop_front() { - let mut signers = empty_steps_signers(&ancestor); - sp_std::mem::swap(&mut signers, &mut parent_empty_step_signers); - signers.insert(ancestor.author); - - add_signers_votes(validators, &signers, &mut votes.votes)?; - - unaccounted_ancestry.push_front(FinalityAncestor { - id: ancestor_id, - submitter: ancestor_submitter, - signers, - }); - } - votes.ancestry.extend(unaccounted_ancestry); - - // add votes from block itself - let mut header_signers = BTreeSet::new(); - header_signers.insert(header.author); - *votes.votes.entry(header.author).or_insert(0) += 1; - votes.ancestry.push_back(FinalityAncestor { - id, - submitter, - signers: header_signers, - }); - - Ok(votes) -} - -/// Increase count of 'votes' for every passed signer. -/// Fails if at least one of signers is not in the `validators` set. -fn add_signers_votes( - validators: &BTreeSet<&Address>, - signers_to_add: &BTreeSet
, - votes: &mut BTreeMap, -) -> Result<(), Error> { - for signer in signers_to_add { - if !validators.contains(signer) { - return Err(Error::NotValidator); - } - - *votes.entry(*signer).or_insert(0) += 1; - } - - Ok(()) -} - -/// Decrease 'votes' count for every passed signer. -fn remove_signers_votes(signers_to_remove: &BTreeSet
, votes: &mut BTreeMap) { - for signer in signers_to_remove { - match votes.entry(*signer) { - Entry::Occupied(mut entry) => { - if *entry.get() <= 1 { - entry.remove(); - } else { - *entry.get_mut() -= 1; - } - } - Entry::Vacant(_) => unreachable!("we only remove signers that have been added; qed"), - } - } -} - -/// Returns unique set of empty steps signers. -fn empty_steps_signers(header: &AuraHeader) -> BTreeSet
{ - header - .empty_steps() - .into_iter() - .flatten() - .filter_map(|step| empty_step_signer(&step, &header.parent_hash)) - .collect::>() -} - -/// Returns author of empty step signature. -fn empty_step_signer(empty_step: &SealedEmptyStep, parent_hash: &H256) -> Option
{ - let message = empty_step.message(parent_hash); - secp256k1_ecdsa_recover(empty_step.signature.as_fixed_bytes(), message.as_fixed_bytes()) - .ok() - .map(|public| public_to_address(&public)) -} - -impl Default for CachedFinalityVotes { - fn default() -> Self { - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: VecDeque::new(), - votes: None, - } - } -} - -impl Default for FinalityVotes { - fn default() -> Self { - FinalityVotes { - votes: BTreeMap::new(), - ancestry: VecDeque::new(), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime}; - use crate::{BridgeStorage, FinalityCache, HeaderToImport}; - use frame_support::StorageMap; - - const TOTAL_VALIDATORS: usize = 5; - - #[test] - fn verifies_header_author() { - run_test(TOTAL_VALIDATORS, |_| { - assert_eq!( - finalize_blocks( - &BridgeStorage::::new(), - Default::default(), - (Default::default(), &[]), - Default::default(), - None, - &AuraHeader::default(), - 0, - ), - Err(Error::NotValidator), - ); - }); - } - - #[test] - fn finalize_blocks_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - // let's say we have 5 validators (we need 'votes' from 3 validators to achieve - // finality) - let mut storage = BridgeStorage::::new(); - - // when header#1 is inserted, nothing is finalized (1 vote) - let header1 = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(0)); - let id1 = header1.compute_id(); - let mut header_to_import = HeaderToImport { - context: storage.import_context(None, &header1.parent_hash).unwrap(), - is_best: true, - id: id1, - header: header1, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: Default::default(), - }; - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id1, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#2 is inserted, nothing is finalized (2 votes) - header_to_import.header = HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1)); - header_to_import.id = header_to_import.header.compute_id(); - let id2 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id2, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(Vec::new()), - ); - storage.insert_header(header_to_import.clone()); - - // when header#3 is inserted, header#1 is finalized (3 votes) - header_to_import.header = HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2)); - header_to_import.id = header_to_import.header.compute_id(); - let id3 = header_to_import.header.compute_id(); - assert_eq!( - finalize_blocks( - &storage, - ctx.genesis.compute_id(), - (Default::default(), &ctx.addresses), - id3, - None, - &header_to_import.header, - u64::max_value(), - ) - .map(|eff| eff.finalized_headers), - Ok(vec![(id1, None)]), - ); - storage.insert_header(header_to_import); - }); - } - - #[test] - fn cached_votes_are_updated_with_ancestry() { - // we're inserting header#5 - // cached votes are from header#3 - // header#4 has finalized header#1 and header#2 - // => when inserting header#5, we need to: - // 1) remove votes from header#1 and header#2 - // 2) add votes from header#4 and header#5 - let validators = validators_addresses(5); - let headers = (1..6) - .map(|number| HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1))) - .collect::>(); - let ancestry = headers - .iter() - .map(|header| FinalityAncestor { - id: header.compute_id(), - signers: vec![header.author].into_iter().collect(), - ..Default::default() - }) - .collect::>(); - let header5 = headers[4].clone(); - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: vec![(headers[3].compute_id(), None, headers[3].clone()),] - .into_iter() - .collect(), - votes: Some(FinalityVotes { - votes: vec![(validators[0], 1), (validators[1], 1), (validators[2], 1),] - .into_iter() - .collect(), - ancestry: ancestry[..3].iter().cloned().collect(), - }), - }, - headers[1].compute_id(), - &validators.iter().collect(), - header5.compute_id(), - &header5, - None, - ) - .unwrap(), - FinalityVotes { - votes: vec![(validators[2], 1), (validators[3], 1), (validators[4], 1),] - .into_iter() - .collect(), - ancestry: ancestry[2..].iter().cloned().collect(), - }, - ); - } - - #[test] - fn prepare_votes_respects_finality_cache() { - run_test(TOTAL_VALIDATORS, |ctx| { - // we need signatures of 3 validators to finalize block - let mut storage = BridgeStorage::::new(); - - // headers 1..3 are signed by validator#0 - // headers 4..6 are signed by validator#1 - // headers 7..9 are signed by validator#2 - let mut hashes = Vec::new(); - let mut headers = Vec::new(); - let mut ancestry = Vec::new(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..10 { - let header = HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3)); - let id = header.compute_id(); - insert_header(&mut storage, header.clone()); - hashes.push(id.hash); - ancestry.push(FinalityAncestor { - id: header.compute_id(), - submitter: None, - signers: vec![header.author].into_iter().collect(), - }); - headers.push(header); - parent_hash = id.hash; - } - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly without cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 3), (ctx.addresses[2], 1)] - .into_iter() - .collect(), - ancestry: ancestry[..7].iter().cloned().collect(), - }; - let id7 = headers[6].compute_id(); - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // cached votes at #5 - let expected_votes_at_5 = FinalityVotes { - votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 2)].into_iter().collect(), - ancestry: ancestry[..5].iter().cloned().collect(), - }; - FinalityCache::::insert(hashes[4], expected_votes_at_5); - - // when we're inserting header#7 and last finalized header is 0: - // check that votes at #7 are computed correctly with cache - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &ctx.genesis.compute_id(), - |_| false, - ), - Default::default(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - - // when we're inserting header#7 and last finalized header is 3: - // check that votes at #7 are computed correctly with cache - let expected_votes_at_7 = FinalityVotes { - votes: vec![(ctx.addresses[1], 3), (ctx.addresses[2], 1)].into_iter().collect(), - ancestry: ancestry[3..7].iter().cloned().collect(), - }; - assert_eq!( - prepare_votes( - storage.cached_finality_votes( - &headers.get(5).unwrap().compute_id(), - &headers.get(2).unwrap().compute_id(), - |hash| *hash == hashes[2], - ), - headers[2].compute_id(), - &ctx.addresses.iter().collect(), - id7, - headers.get(6).unwrap(), - None, - ) - .unwrap(), - expected_votes_at_7, - ); - }); - } - - #[test] - fn prepare_votes_fails_when_finalized_sibling_is_in_ancestry() { - assert_eq!( - prepare_votes::<()>( - CachedFinalityVotes { - stopped_at_finalized_sibling: true, - ..Default::default() - }, - Default::default(), - &validators_addresses(3).iter().collect(), - Default::default(), - &Default::default(), - None, - ), - Err(Error::TryingToFinalizeSibling), - ); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/import.rs b/polkadot/bridges/modules/ethereum/src/import.rs deleted file mode 100644 index 1b41c3a8b20ad4d2f76699e242880f84f6f285de..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/import.rs +++ /dev/null @@ -1,609 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::finality::finalize_blocks; -use crate::validators::{Validators, ValidatorsConfiguration}; -use crate::verification::{is_importable_header, verify_aura_header}; -use crate::{AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage}; -use bp_eth_poa::{AuraHeader, HeaderId, Receipt}; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; - -/// Imports bunch of headers and updates blocks finality. -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// If successful, returns tuple where first element is the number of useful headers -/// we have imported and the second element is the number of useless headers (duplicate) -/// we have NOT imported. -/// Returns error if fatal error has occured during import. Some valid headers may be -/// imported in this case. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_headers( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - headers: Vec<(AuraHeader, Option>)>, - chain_time: &CT, - finalized_headers: &mut BTreeMap, -) -> Result<(u64, u64), Error> { - let mut useful = 0; - let mut useless = 0; - for (header, receipts) in headers { - let import_result = import_header( - storage, - pruning_strategy, - aura_config, - validators_config, - submitter.clone(), - header, - chain_time, - receipts, - ); - - match import_result { - Ok((_, finalized)) => { - for (_, submitter) in finalized { - if let Some(submitter) = submitter { - *finalized_headers.entry(submitter).or_default() += 1; - } - } - useful += 1; - } - Err(Error::AncientHeader) | Err(Error::KnownHeader) => useless += 1, - Err(error) => return Err(error), - } - } - - Ok((useful, useless)) -} - -/// A vector of finalized headers and their submitters. -pub type FinalizedHeaders = Vec<(HeaderId, Option<::Submitter>)>; - -/// Imports given header and updates blocks finality (if required). -/// -/// Transactions receipts must be provided if `header_import_requires_receipts()` -/// has returned true. -/// -/// Returns imported block id and list of all finalized headers. -/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) -#[allow(clippy::too_many_arguments)] -pub fn import_header( - storage: &mut S, - pruning_strategy: &mut PS, - aura_config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - submitter: Option, - header: AuraHeader, - chain_time: &CT, - receipts: Option>, -) -> Result<(HeaderId, FinalizedHeaders), Error> { - // first check that we are able to import this header at all - let (header_id, finalized_id) = is_importable_header(storage, &header)?; - - // verify header - let import_context = verify_aura_header(storage, aura_config, submitter, &header, chain_time)?; - - // check if block schedules new validators - let validators = Validators::new(validators_config); - let (scheduled_change, enacted_change) = validators.extract_validators_change(&header, receipts)?; - - // check if block finalizes some other blocks and corresponding scheduled validators - let validators_set = import_context.validators_set(); - let finalized_blocks = finalize_blocks( - storage, - finalized_id, - (validators_set.enact_block, &validators_set.validators), - header_id, - import_context.submitter(), - &header, - aura_config.two_thirds_majority_transition, - )?; - let enacted_change = enacted_change - .map(|validators| ChangeToEnact { - signal_block: None, - validators, - }) - .or_else(|| validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers)); - - // NOTE: we can't return Err() from anywhere below this line - // (because otherwise we'll have inconsistent storage if transaction will fail) - - // and finally insert the block - let (best_id, best_total_difficulty) = storage.best_block(); - let total_difficulty = import_context.total_difficulty() + header.difficulty; - let is_best = total_difficulty > best_total_difficulty; - storage.insert_header(import_context.into_import_header( - is_best, - header_id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finalized_blocks.votes, - )); - - // compute upper border of updated pruning range - let new_best_block_id = if is_best { header_id } else { best_id }; - let new_best_finalized_block_id = finalized_blocks.finalized_headers.last().map(|(id, _)| *id); - let pruning_upper_bound = pruning_strategy.pruning_upper_bound( - new_best_block_id.number, - new_best_finalized_block_id - .map(|id| id.number) - .unwrap_or(finalized_id.number), - ); - - // now mark finalized headers && prune old headers - storage.finalize_and_prune_headers(new_best_finalized_block_id, pruning_upper_bound); - - Ok((header_id, finalized_blocks.finalized_headers)) -} - -/// Returns true if transactions receipts are required to import given header. -pub fn header_import_requires_receipts( - storage: &S, - validators_config: &ValidatorsConfiguration, - header: &AuraHeader, -) -> bool { - is_importable_header(storage, header) - .map(|_| Validators::new(validators_config)) - .map(|validators| validators.maybe_signals_validators_change(header)) - .unwrap_or(false) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - run_test, secret_to_address, test_aura_config, test_validators_config, validator, validators_addresses, - validators_change_receipt, HeaderBuilder, KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT, - }; - use crate::validators::ValidatorsSource; - use crate::DefaultInstance; - use crate::{BlocksToPrune, BridgeStorage, Headers, PruningRange}; - use frame_support::{StorageMap, StorageValue}; - use secp256k1::SecretKey; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn rejects_finalized_block_competitors() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - storage.finalize_and_prune_headers( - Some(HeaderId { - number: 100, - ..Default::default() - }), - 0, - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - Default::default(), - &(), - None, - ), - Err(Error::AncientHeader), - ); - }); - } - - #[test] - fn rejects_known_header() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header.clone(), - &(), - None, - ) - .map(|_| ()), - Ok(()), - ); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &test_validators_config(), - None, - header, - &(), - None, - ) - .map(|_| ()), - Err(Error::KnownHeader), - ); - }); - } - - #[test] - fn import_header_works() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(ctx.addresses.clone())), - (1, ValidatorsSource::List(validators_addresses(2))), - ]); - let mut storage = BridgeStorage::::new(); - let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); - let hash = header.compute_hash(); - assert_eq!( - import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - None, - header, - &(), - None - ) - .map(|_| ()), - Ok(()), - ); - - // check that new validators will be used for next header - let imported_header = Headers::::get(&hash).unwrap(); - assert_eq!( - imported_header.next_validators_set_id, - 1, // new set is enacted from config - ); - }); - } - - #[test] - fn headers_are_pruned_during_import() { - run_test(TOTAL_VALIDATORS, |ctx| { - let validators_config = - ValidatorsConfiguration::Single(ValidatorsSource::Contract([3; 20].into(), ctx.addresses.clone())); - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - - // header [0..11] are finalizing blocks [0; 9] - // => since we want to keep 10 finalized blocks, we aren't pruning anything - let mut latest_block_id = Default::default(); - for i in 1..11 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&validators); - let parent_id = header.parent_id().unwrap(); - - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(100), - header, - &(), - None, - ) - .unwrap(); - match i { - 2..=10 => assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,), - _ => assert_eq!(finalized_blocks, vec![], "At {}", i), - } - latest_block_id = rolling_last_block_id; - } - assert!(storage.header(&ctx.genesis.compute_hash()).is_some()); - - // header 11 finalizes headers [10] AND schedules change - // => we prune header#0 - let header11 = HeaderBuilder::with_parent_number(10) - .log_bloom((&[0xff; 256]).into()) - .receipts_root( - "ead6c772ba0083bbff497ba0f4efe47c199a2655401096c21ab7450b6c466d97" - .parse() - .unwrap(), - ) - .sign_by_set(&validators); - let parent_id = header11.parent_id().unwrap(); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(101), - header11.clone(), - &(), - Some(vec![validators_change_receipt(latest_block_id.hash)]), - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![(parent_id, Some(100))],); - assert!(storage.header(&ctx.genesis.compute_hash()).is_none()); - latest_block_id = rolling_last_block_id; - - // and now let's say validators 1 && 2 went offline - // => in the range 12-25 no blocks are finalized, but we still continue to prune old headers - // until header#11 is met. we can't prune #11, because it schedules change - let mut step = 56u64; - let mut expected_blocks = vec![(header11.compute_id(), Some(101))]; - for i in 12..25 { - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(i.into()) - .step(step) - .sign_by_set(&validators); - expected_blocks.push((header.compute_id(), Some(102))); - let (rolling_last_block_id, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(102), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, vec![],); - latest_block_id = rolling_last_block_id; - step += 3; - } - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 11, - oldest_block_to_keep: 14, - }, - ); - - // now let's insert block signed by validator 1 - // => blocks 11..24 are finalized and blocks 11..14 are pruned - step -= 2; - let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) - .difficulty(25.into()) - .step(step) - .sign_by_set(&validators); - let (_, finalized_blocks) = import_header( - &mut storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &validators_config, - Some(103), - header, - &(), - None, - ) - .unwrap(); - assert_eq!(finalized_blocks, expected_blocks); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 15, - oldest_block_to_keep: 15, - }, - ); - }); - } - - fn import_custom_block( - storage: &mut S, - validators: &[SecretKey], - header: AuraHeader, - ) -> Result { - let id = header.compute_id(); - import_header( - storage, - &mut KeepSomeHeadersBehindBest::default(), - &test_aura_config(), - &ValidatorsConfiguration::Single(ValidatorsSource::Contract( - [0; 20].into(), - validators.iter().map(secret_to_address).collect(), - )), - None, - header, - &(), - None, - ) - .map(|_| id) - } - - #[test] - fn import_of_non_best_block_may_finalize_blocks() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert headers (H1, validator1), (H2, validator1), (H3, validator1) - // making H3 the best header, without finalizing anything (we need 2 signatures) - let mut expected_best_block = Default::default(); - for i in 1..4 { - let step = 1 + i * TOTAL_VALIDATORS as u64; - expected_best_block = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(i - 1) - .step(step) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - } - let (best_block, best_difficulty) = storage.best_block(); - assert_eq!(best_block, expected_best_block); - assert_eq!(storage.finalized_block(), ctx.genesis.compute_id()); - - // insert headers (H1', validator1), (H2', validator2), finalizing H2, even though H3 - // has better difficulty than H2' (because there are more steps involved) - let mut expected_finalized_block = Default::default(); - let mut parent_hash = ctx.genesis.compute_hash(); - for i in 1..3 { - let step = i; - let id = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(parent_hash) - .step(step) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - parent_hash = id.hash; - if i == 1 { - expected_finalized_block = id; - } - } - let (new_best_block, new_best_difficulty) = storage.best_block(); - assert_eq!(new_best_block, expected_best_block); - assert_eq!(new_best_difficulty, best_difficulty); - assert_eq!(storage.finalized_block(), expected_finalized_block); - }); - } - - #[test] - fn append_to_unfinalized_fork_fails() { - const VALIDATORS: u64 = 5; - run_test(VALIDATORS as usize, |ctx| { - let mut storage = BridgeStorage::::new(); - - // header1, authored by validator[2] is best common block between two competing forks - let header1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(0) - .step(2) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header1); - assert_eq!(storage.finalized_block().number, 0); - - // validator[3] has authored header2 (nothing is finalized yet) - let header2 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .step(3) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header2); - assert_eq!(storage.finalized_block().number, 0); - - // validator[4] has authored header3 (header1 is finalized) - let header3 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(2) - .step(4) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header3); - assert_eq!(storage.finalized_block(), header1); - - // validator[4] has authored 4 blocks: header2'...header5' (header1 is still finalized) - let header2_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(4) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header3_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header2_1.hash) - .step(4 + VALIDATORS) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header4_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header3_1.hash) - .step(4 + VALIDATORS * 2) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - let header5_1 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4_1.hash) - .step(4 + VALIDATORS * 3) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header1); - - // when we import header4 { parent = header3 }, authored by validator[0], header2 is finalized - let header4 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(3) - .step(5) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5_1); - assert_eq!(storage.finalized_block(), header2); - - // when we import header5 { parent = header4 }, authored by validator[1], header3 is finalized - let header5 = import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_hash(header4.hash) - .step(6) - .sign_by_set(&ctx.validators), - ) - .unwrap(); - assert_eq!(storage.best_block().0, header5); - assert_eq!(storage.finalized_block(), header3); - - // import of header2'' { parent = header1 } fails, because it has number < best_finalized - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .step(3) - .sign_by_set(&ctx.validators) - ), - Err(Error::AncientHeader), - ); - - // import of header6' should also fail because we're trying to append to fork thas - // has forked before finalized block - assert_eq!( - import_custom_block( - &mut storage, - &ctx.validators, - HeaderBuilder::with_parent_number(5) - .gas_limit((GAS_LIMIT + 1).into()) - .step(5 + VALIDATORS * 4) - .sign_by_set(&ctx.validators), - ), - Err(Error::TryingToFinalizeSibling), - ); - }); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/lib.rs b/polkadot/bridges/modules/ethereum/src/lib.rs deleted file mode 100644 index 05beb279a926c7a71b547e888bf587037a1e178c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/lib.rs +++ /dev/null @@ -1,1553 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use crate::finality::{CachedFinalityVotes, FinalityVotes}; -use bp_eth_poa::{Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256}; -use codec::{Decode, Encode}; -use frame_support::{decl_module, decl_storage, traits::Get}; -use sp_runtime::{ - transaction_validity::{ - InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, TransactionValidity, - UnknownTransaction, ValidTransaction, - }, - RuntimeDebug, -}; -use sp_std::{cmp::Ord, collections::btree_map::BTreeMap, prelude::*}; - -pub use validators::{ValidatorsConfiguration, ValidatorsSource}; - -mod error; -mod finality; -mod import; -mod validators; -mod verification; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - -#[cfg(test)] -mod mock; - -#[cfg(any(feature = "runtime-benchmarks", test))] -pub mod test_utils; - -/// Maximal number of blocks we're pruning in single import call. -const MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT: u64 = 8; - -/// Authority round engine configuration parameters. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct AuraConfiguration { - /// Empty step messages transition block. - pub empty_steps_transition: u64, - /// Transition block to strict empty steps validation. - pub strict_empty_steps_transition: u64, - /// Monotonic step validation transition block. - pub validate_step_transition: u64, - /// Chain score validation transition block. - pub validate_score_transition: u64, - /// First block for which a 2/3 quorum (instead of 1/2) is required. - pub two_thirds_majority_transition: u64, - /// Minimum gas limit. - pub min_gas_limit: U256, - /// Maximum gas limit. - pub max_gas_limit: U256, - /// Maximum size of extra data. - pub maximum_extra_data_size: u64, -} - -/// Transaction pool configuration. -/// -/// This is used to limit number of unsigned headers transactions in -/// the pool. We never use it to verify signed transactions. -pub struct PoolConfiguration { - /// Maximal difference between number of header from unsigned transaction - /// and current best block. This must be selected with caution - the more - /// is the difference, the more (potentially invalid) transactions could be - /// accepted to the pool and mined later (filling blocks with spam). - pub max_future_number_difference: u64, -} - -/// Block header as it is stored in the runtime storage. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct StoredHeader { - /// Submitter of this header. May be `None` if header has been submitted - /// using unsigned transaction. - pub submitter: Option, - /// The block header itself. - pub header: AuraHeader, - /// Total difficulty of the chain. - pub total_difficulty: U256, - /// The ID of set of validators that is expected to produce direct descendants of - /// this block. If header enacts new set, this would be the new set. Otherwise - /// this is the set that has produced the block itself. - /// The hash is the hash of block where validators set has been enacted. - pub next_validators_set_id: u64, - /// Hash of the last block which has **SCHEDULED** validators set change. - /// Note that signal doesn't mean that the set has been (or ever will be) enacted. - /// Note that the header may already be pruned. - pub last_signal_block: Option, -} - -/// Validators set as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug)] -#[cfg_attr(test, derive(Clone))] -pub struct ValidatorsSet { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block where this set has been signalled. None if this is the first set. - pub signal_block: Option, - /// Hash of the block where this set has been enacted. - pub enact_block: HeaderId, -} - -/// Validators set change as it is stored in the runtime storage. -#[derive(Encode, Decode, PartialEq, RuntimeDebug)] -#[cfg_attr(test, derive(Clone))] -pub struct AuraScheduledChange { - /// Validators of this set. - pub validators: Vec
, - /// Hash of the block which has emitted previous validators change signal. - pub prev_signal_block: Option, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct HeaderToImport { - /// Header import context, - pub context: ImportContext, - /// Should we consider this header as best? - pub is_best: bool, - /// The id of the header. - pub id: HeaderId, - /// The header itself. - pub header: AuraHeader, - /// Total chain difficulty at the header. - pub total_difficulty: U256, - /// New validators set and the hash of block where it has been scheduled (if applicable). - /// Some if set is is enacted by this header. - pub enacted_change: Option, - /// Validators set scheduled change, if happened at the header. - pub scheduled_change: Option>, - /// Finality votes at this header. - pub finality_votes: FinalityVotes, -} - -/// Header that we're importing. -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ChangeToEnact { - /// The id of the header where change has been scheduled. - /// None if it is a first set within current `ValidatorsSource`. - pub signal_block: Option, - /// Validators set that is enacted. - pub validators: Vec
, -} - -/// Blocks range that we want to prune. -#[derive(Encode, Decode, Default, RuntimeDebug, Clone, PartialEq)] -struct PruningRange { - /// Number of the oldest unpruned block(s). This might be the block that we do not - /// want to prune now (then it is equal to `oldest_block_to_keep`), or block that we - /// were unable to prune for whatever reason (i.e. if it isn't finalized yet and has - /// scheduled validators set change). - pub oldest_unpruned_block: u64, - /// Number of oldest block(s) that we want to keep. We want to prune blocks in range - /// [`oldest_unpruned_block`; `oldest_block_to_keep`). - pub oldest_block_to_keep: u64, -} - -/// Header import context. -/// -/// The import context contains information needed by the header verification -/// pipeline which is not directly part of the header being imported. This includes -/// information relating to its parent, and the current validator set (which -/// provide _context_ for the current header). -#[derive(RuntimeDebug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct ImportContext { - submitter: Option, - parent_hash: H256, - parent_header: AuraHeader, - parent_total_difficulty: U256, - parent_scheduled_change: Option, - validators_set_id: u64, - validators_set: ValidatorsSet, - last_signal_block: Option, -} - -impl ImportContext { - /// Returns reference to header submitter (if known). - pub fn submitter(&self) -> Option<&Submitter> { - self.submitter.as_ref() - } - - /// Returns reference to parent header. - pub fn parent_header(&self) -> &AuraHeader { - &self.parent_header - } - - /// Returns total chain difficulty at parent block. - pub fn total_difficulty(&self) -> &U256 { - &self.parent_total_difficulty - } - - /// Returns the validator set change if the parent header has signaled a change. - pub fn parent_scheduled_change(&self) -> Option<&AuraScheduledChange> { - self.parent_scheduled_change.as_ref() - } - - /// Returns id of the set of validators. - pub fn validators_set_id(&self) -> u64 { - self.validators_set_id - } - - /// Returns reference to validators set for the block we're going to import. - pub fn validators_set(&self) -> &ValidatorsSet { - &self.validators_set - } - - /// Returns reference to the latest block which has signalled change of validators set. - /// This may point to parent if parent has signalled change. - pub fn last_signal_block(&self) -> Option { - match self.parent_scheduled_change { - Some(_) => Some(HeaderId { - number: self.parent_header.number, - hash: self.parent_hash, - }), - None => self.last_signal_block, - } - } - - /// Converts import context into header we're going to import. - #[allow(clippy::too_many_arguments)] - pub fn into_import_header( - self, - is_best: bool, - id: HeaderId, - header: AuraHeader, - total_difficulty: U256, - enacted_change: Option, - scheduled_change: Option>, - finality_votes: FinalityVotes, - ) -> HeaderToImport { - HeaderToImport { - context: self, - is_best, - id, - header, - total_difficulty, - enacted_change, - scheduled_change, - finality_votes, - } - } -} - -/// The storage that is used by the client. -/// -/// Storage modification must be discarded if block import has failed. -pub trait Storage { - /// Header submitter identifier. - type Submitter: Clone + Ord; - - /// Get best known block and total chain difficulty. - fn best_block(&self) -> (HeaderId, U256); - /// Get last finalized block. - fn finalized_block(&self) -> HeaderId; - /// Get imported header by its hash. - /// - /// Returns header and its submitter (if known). - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)>; - /// Returns latest cached finality votes (if any) for block ancestors, starting - /// from `parent_hash` block and stopping at genesis block, best finalized block - /// or block where `stop_at` returns true. - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes; - /// Get header import context by parent header hash. - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option>; - /// Get new validators that are scheduled by given header and hash of the previous - /// block that has scheduled change. - fn scheduled_change(&self, hash: &H256) -> Option; - /// Insert imported header. - fn insert_header(&mut self, header: HeaderToImport); - /// Finalize given block and schedules pruning of all headers - /// with number < prune_end. - /// - /// The headers in the pruning range could be either finalized, or not. - /// It is the storage duty to ensure that unfinalized headers that have - /// scheduled changes won't be pruned until they or their competitors - /// are finalized. - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64); -} - -/// Headers pruning strategy. -pub trait PruningStrategy: Default { - /// Return upper bound (exclusive) of headers pruning range. - /// - /// Every value that is returned from this function, must be greater or equal to the - /// previous value. Otherwise it will be ignored (we can't revert pruning). - /// - /// Module may prune both finalized and unfinalized blocks. But it can't give any - /// guarantees on when it will happen. Example: if some unfinalized block at height N - /// has scheduled validators set change, then the module won't prune any blocks with - /// number >= N even if strategy allows that. - /// - /// If your strategy allows pruning unfinalized blocks, this could lead to switch - /// between finalized forks (only if authorities are misbehaving). But since 50%+1 (or 2/3) - /// authorities are able to do whatever they want with the chain, this isn't considered - /// fatal. If your strategy only prunes finalized blocks, we'll never be able to finalize - /// header that isn't descendant of current best finalized block. - fn pruning_upper_bound(&mut self, best_number: u64, best_finalized_number: u64) -> u64; -} - -/// ChainTime represents the runtime on-chain time -pub trait ChainTime: Default { - /// Is a header timestamp ahead of the current on-chain time. - /// - /// Check whether `timestamp` is ahead (i.e greater than) the current on-chain - /// time. If so, return `true`, `false` otherwise. - fn is_timestamp_ahead(&self, timestamp: u64) -> bool; -} - -/// ChainTime implementation for the empty type. -/// -/// This implementation will allow a runtime without the timestamp pallet to use -/// the empty type as its ChainTime associated type. -impl ChainTime for () { - fn is_timestamp_ahead(&self, _: u64) -> bool { - false - } -} - -/// Callbacks for header submission rewards/penalties. -pub trait OnHeadersSubmitted { - /// Called when valid headers have been submitted. - /// - /// The submitter **must not** be rewarded for submitting valid headers, because greedy authority - /// could produce and submit multiple valid headers (without relaying them to other peers) and - /// get rewarded. Instead, the provider could track submitters and stop rewarding if too many - /// headers have been submitted without finalization. - fn on_valid_headers_submitted(submitter: AccountId, useful: u64, useless: u64); - /// Called when invalid headers have been submitted. - fn on_invalid_headers_submitted(submitter: AccountId); - /// Called when earlier submitted headers have been finalized. - /// - /// finalized is the number of headers that submitter has submitted and which - /// have been finalized. - fn on_valid_headers_finalized(submitter: AccountId, finalized: u64); -} - -impl OnHeadersSubmitted for () { - fn on_valid_headers_submitted(_submitter: AccountId, _useful: u64, _useless: u64) {} - fn on_invalid_headers_submitted(_submitter: AccountId) {} - fn on_valid_headers_finalized(_submitter: AccountId, _finalized: u64) {} -} - -/// The module configuration trait. -pub trait Config: frame_system::Config { - /// Aura configuration. - type AuraConfiguration: Get; - /// Validators configuration. - type ValidatorsConfiguration: Get; - - /// Interval (in blocks) for for finality votes caching. - /// If None, cache is disabled. - /// - /// Ideally, this should either be None (when we are sure that there won't - /// be any significant finalization delays), or something that is bit larger - /// than average finalization delay. - type FinalityVotesCachingInterval: Get>; - /// Headers pruning strategy. - type PruningStrategy: PruningStrategy; - /// Header timestamp verification against current on-chain time. - type ChainTime: ChainTime; - - /// Handler for headers submission result. - type OnHeadersSubmitted: OnHeadersSubmitted; -} - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Import single Aura header. Requires transaction to be **UNSIGNED**. - #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_unsigned_header(origin, header: AuraHeader, receipts: Option>) { - frame_system::ensure_none(origin)?; - - import::import_header( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - None, - header, - &T::ChainTime::default(), - receipts, - ).map_err(|e| e.msg())?; - } - - /// Import Aura chain headers in a single **SIGNED** transaction. - /// Ignores non-fatal errors (like when known header is provided), rewards - /// for successful headers import and penalizes for fatal errors. - /// - /// This should be used with caution - passing too many headers could lead to - /// enormous block production/import time. - #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - pub fn import_signed_headers(origin, headers_with_receipts: Vec<(AuraHeader, Option>)>) { - let submitter = frame_system::ensure_signed(origin)?; - let mut finalized_headers = BTreeMap::new(); - let import_result = import::import_headers( - &mut BridgeStorage::::new(), - &mut T::PruningStrategy::default(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - Some(submitter.clone()), - headers_with_receipts, - &T::ChainTime::default(), - &mut finalized_headers, - ); - - // if we have finalized some headers, we will reward their submitters even - // if current submitter has provided some invalid headers - for (f_submitter, f_count) in finalized_headers { - T::OnHeadersSubmitted::on_valid_headers_finalized( - f_submitter, - f_count, - ); - } - - // now track/penalize current submitter for providing new headers - match import_result { - Ok((useful, useless)) => - T::OnHeadersSubmitted::on_valid_headers_submitted(submitter, useful, useless), - Err(error) => { - // even though we may have accept some headers, we do not want to reward someone - // who provides invalid headers - T::OnHeadersSubmitted::on_invalid_headers_submitted(submitter); - return Err(error.msg().into()); - }, - } - } - } -} - -decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as Bridge { - /// Best known block. - BestBlock: (HeaderId, U256); - /// Best finalized block. - FinalizedBlock: HeaderId; - /// Range of blocks that we want to prune. - BlocksToPrune: PruningRange; - /// Map of imported headers by hash. - Headers: map hasher(identity) H256 => Option>; - /// Map of imported header hashes by number. - HeadersByNumber: map hasher(blake2_128_concat) u64 => Option>; - /// Map of cached finality data by header hash. - FinalityCache: map hasher(identity) H256 => Option>; - /// The ID of next validator set. - NextValidatorsSetId: u64; - /// Map of validators sets by their id. - ValidatorsSets: map hasher(twox_64_concat) u64 => Option; - /// Validators sets reference count. Each header that is authored by this set increases - /// the reference count. When we prune this header, we decrease the reference count. - /// When it reaches zero, we are free to prune validator set as well. - ValidatorsSetsRc: map hasher(twox_64_concat) u64 => Option; - /// Map of validators set changes scheduled by given header. - ScheduledChanges: map hasher(identity) H256 => Option; - } - add_extra_genesis { - config(initial_header): AuraHeader; - config(initial_difficulty): U256; - config(initial_validators): Vec
; - build(|config| { - // the initial blocks should be selected so that: - // 1) it doesn't signal validators changes; - // 2) there are no scheduled validators changes from previous blocks; - // 3) (implied) all direct children of initial block are authored by the same validators set. - - assert!( - !config.initial_validators.is_empty(), - "Initial validators set can't be empty", - ); - - initialize_storage::( - &config.initial_header, - config.initial_difficulty, - &config.initial_validators, - ); - }) - } -} - -impl, I: Instance> Module { - /// Returns number and hash of the best block known to the bridge module. - /// The caller should only submit `import_header` transaction that makes - /// (or leads to making) other header the best one. - pub fn best_block() -> HeaderId { - BridgeStorage::::new().best_block().0 - } - - /// Returns number and hash of the best finalized block known to the bridge module. - pub fn finalized_block() -> HeaderId { - BridgeStorage::::new().finalized_block() - } - - /// Returns true if the import of given block requires transactions receipts. - pub fn is_import_requires_receipts(header: AuraHeader) -> bool { - import::header_import_requires_receipts( - &BridgeStorage::::new(), - &T::ValidatorsConfiguration::get(), - &header, - ) - } - - /// Returns true if header is known to the runtime. - pub fn is_known_block(hash: H256) -> bool { - BridgeStorage::::new().header(&hash).is_some() - } - - /// Verify that transaction is included into given finalized block. - pub fn verify_transaction_finalized( - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], - ) -> bool { - crate::verify_transaction_finalized(&BridgeStorage::::new(), block, tx_index, proof) - } -} - -impl, I: Instance> frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - - fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { - match *call { - Self::Call::import_unsigned_header(ref header, ref receipts) => { - let accept_result = verification::accept_aura_header_into_pool( - &BridgeStorage::::new(), - &T::AuraConfiguration::get(), - &T::ValidatorsConfiguration::get(), - &pool_configuration(), - header, - &T::ChainTime::default(), - receipts.as_ref(), - ); - - match accept_result { - Ok((requires, provides)) => Ok(ValidTransaction { - priority: TransactionPriority::max_value(), - requires, - provides, - longevity: TransactionLongevity::max_value(), - propagate: true, - }), - // UnsignedTooFarInTheFuture is the special error code used to limit - // number of transactions in the pool - we do not want to ban transaction - // in this case (see verification.rs for details) - Err(error::Error::UnsignedTooFarInTheFuture) => { - UnknownTransaction::Custom(error::Error::UnsignedTooFarInTheFuture.code()).into() - } - Err(error) => InvalidTransaction::Custom(error.code()).into(), - } - } - _ => InvalidTransaction::Call.into(), - } - } -} - -/// Runtime bridge storage. -#[derive(Default)] -pub struct BridgeStorage(sp_std::marker::PhantomData<(T, I)>); - -impl, I: Instance> BridgeStorage { - /// Create new BridgeStorage. - pub fn new() -> Self { - BridgeStorage(sp_std::marker::PhantomData::<(T, I)>::default()) - } - - /// Prune old blocks. - fn prune_blocks(&self, mut max_blocks_to_prune: u64, finalized_number: u64, prune_end: u64) { - let pruning_range = BlocksToPrune::::get(); - let mut new_pruning_range = pruning_range.clone(); - - // update oldest block we want to keep - if prune_end > new_pruning_range.oldest_block_to_keep { - new_pruning_range.oldest_block_to_keep = prune_end; - } - - // start pruning blocks - let begin = new_pruning_range.oldest_unpruned_block; - let end = new_pruning_range.oldest_block_to_keep; - frame_support::debug::trace!(target: "runtime", "Pruning blocks in range [{}..{})", begin, end); - for number in begin..end { - // if we can't prune anything => break - if max_blocks_to_prune == 0 { - break; - } - - // read hashes of blocks with given number and try to prune these blocks - let blocks_at_number = HeadersByNumber::::take(number); - if let Some(mut blocks_at_number) = blocks_at_number { - self.prune_blocks_by_hashes( - &mut max_blocks_to_prune, - finalized_number, - number, - &mut blocks_at_number, - ); - - // if we haven't pruned all blocks, remember unpruned - if !blocks_at_number.is_empty() { - HeadersByNumber::::insert(number, blocks_at_number); - break; - } - } - - // we have pruned all headers at number - new_pruning_range.oldest_unpruned_block = number + 1; - frame_support::debug::trace!( - target: "runtime", - "Oldest unpruned PoA header is now: {}", - new_pruning_range.oldest_unpruned_block, - ); - } - - // update pruning range in storage - if pruning_range != new_pruning_range { - BlocksToPrune::::put(new_pruning_range); - } - } - - /// Prune old blocks with given hashes. - fn prune_blocks_by_hashes( - &self, - max_blocks_to_prune: &mut u64, - finalized_number: u64, - number: u64, - blocks_at_number: &mut Vec, - ) { - // ensure that unfinalized headers we want to prune do not have scheduled changes - if number > finalized_number && blocks_at_number.iter().any(ScheduledChanges::::contains_key) { - return; - } - - // physically remove headers and (probably) obsolete validators sets - while let Some(hash) = blocks_at_number.pop() { - let header = Headers::::take(&hash); - frame_support::debug::trace!( - target: "runtime", - "Pruning PoA header: ({}, {})", - number, - hash, - ); - - ScheduledChanges::::remove(hash); - FinalityCache::::remove(hash); - if let Some(header) = header { - ValidatorsSetsRc::::mutate(header.next_validators_set_id, |rc| match *rc { - Some(rc) if rc > 1 => Some(rc - 1), - _ => None, - }); - } - - // check if we have already pruned too much headers in this call - *max_blocks_to_prune -= 1; - if *max_blocks_to_prune == 0 { - return; - } - } - } -} - -impl, I: Instance> Storage for BridgeStorage { - type Submitter = T::AccountId; - - fn best_block(&self) -> (HeaderId, U256) { - BestBlock::::get() - } - - fn finalized_block(&self) -> HeaderId { - FinalizedBlock::::get() - } - - fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)> { - Headers::::get(hash).map(|header| (header.header, header.submitter)) - } - - fn cached_finality_votes( - &self, - parent: &HeaderId, - best_finalized: &HeaderId, - stop_at: impl Fn(&H256) -> bool, - ) -> CachedFinalityVotes { - let mut votes = CachedFinalityVotes::default(); - let mut current_id = *parent; - loop { - // if we have reached finalized block's sibling => stop with special signal - if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash { - votes.stopped_at_finalized_sibling = true; - return votes; - } - - // if we have reached target header => stop - if stop_at(¤t_id.hash) { - return votes; - } - - // if we have found cached votes => stop - let cached_votes = FinalityCache::::get(¤t_id.hash); - if let Some(cached_votes) = cached_votes { - votes.votes = Some(cached_votes); - return votes; - } - - // read next parent header id - let header = match Headers::::get(¤t_id.hash) { - Some(header) if header.header.number != 0 => header, - _ => return votes, - }; - let parent_id = header.header.parent_id().expect( - "only returns None at genesis header;\ - the header is proved to have number > 0;\ - qed", - ); - - votes - .unaccounted_ancestry - .push_back((current_id, header.submitter, header.header)); - - current_id = parent_id; - } - } - - fn import_context( - &self, - submitter: Option, - parent_hash: &H256, - ) -> Option> { - Headers::::get(parent_hash).map(|parent_header| { - let validators_set = ValidatorsSets::::get(parent_header.next_validators_set_id) - .expect("validators set is only pruned when last ref is pruned; there is a ref; qed"); - let parent_scheduled_change = ScheduledChanges::::get(parent_hash); - ImportContext { - submitter, - parent_hash: *parent_hash, - parent_header: parent_header.header, - parent_total_difficulty: parent_header.total_difficulty, - parent_scheduled_change, - validators_set_id: parent_header.next_validators_set_id, - validators_set, - last_signal_block: parent_header.last_signal_block, - } - }) - } - - fn scheduled_change(&self, hash: &H256) -> Option { - ScheduledChanges::::get(hash) - } - - fn insert_header(&mut self, header: HeaderToImport) { - if header.is_best { - BestBlock::::put((header.id, header.total_difficulty)); - } - if let Some(scheduled_change) = header.scheduled_change { - ScheduledChanges::::insert( - &header.id.hash, - AuraScheduledChange { - validators: scheduled_change, - prev_signal_block: header.context.last_signal_block, - }, - ); - } - let next_validators_set_id = match header.enacted_change { - Some(enacted_change) => { - let next_validators_set_id = NextValidatorsSetId::::mutate(|set_id| { - let next_set_id = *set_id; - *set_id += 1; - next_set_id - }); - ValidatorsSets::::insert( - next_validators_set_id, - ValidatorsSet { - validators: enacted_change.validators, - enact_block: header.id, - signal_block: enacted_change.signal_block, - }, - ); - ValidatorsSetsRc::::insert(next_validators_set_id, 1); - next_validators_set_id - } - None => { - ValidatorsSetsRc::::mutate(header.context.validators_set_id, |rc| { - *rc = Some(rc.map(|rc| rc + 1).unwrap_or(1)); - *rc - }); - header.context.validators_set_id - } - }; - - let finality_votes_caching_interval = T::FinalityVotesCachingInterval::get(); - if let Some(finality_votes_caching_interval) = finality_votes_caching_interval { - let cache_entry_required = header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0; - if cache_entry_required { - FinalityCache::::insert(header.id.hash, header.finality_votes); - } - } - - frame_support::debug::trace!( - target: "runtime", - "Inserting PoA header: ({}, {})", - header.header.number, - header.id.hash, - ); - - let last_signal_block = header.context.last_signal_block(); - HeadersByNumber::::append(header.id.number, header.id.hash); - Headers::::insert( - &header.id.hash, - StoredHeader { - submitter: header.context.submitter, - header: header.header, - total_difficulty: header.total_difficulty, - next_validators_set_id, - last_signal_block, - }, - ); - } - - fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64) { - // remember just finalized block - let finalized_number = finalized - .as_ref() - .map(|f| f.number) - .unwrap_or_else(|| FinalizedBlock::::get().number); - if let Some(finalized) = finalized { - frame_support::debug::trace!( - target: "runtime", - "Finalizing PoA header: ({}, {})", - finalized.number, - finalized.hash, - ); - - FinalizedBlock::::put(finalized); - } - - // and now prune headers if we need to - self.prune_blocks(MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT, finalized_number, prune_end); - } -} - -/// Initialize storage. -#[cfg(any(feature = "std", feature = "runtime-benchmarks"))] -pub(crate) fn initialize_storage, I: Instance>( - initial_header: &AuraHeader, - initial_difficulty: U256, - initial_validators: &[Address], -) { - let initial_hash = initial_header.compute_hash(); - frame_support::debug::trace!( - target: "runtime", - "Initializing bridge with PoA header: ({}, {})", - initial_header.number, - initial_hash, - ); - - let initial_id = HeaderId { - number: initial_header.number, - hash: initial_hash, - }; - BestBlock::::put((initial_id, initial_difficulty)); - FinalizedBlock::::put(initial_id); - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: initial_header.number, - oldest_block_to_keep: initial_header.number, - }); - HeadersByNumber::::insert(initial_header.number, vec![initial_hash]); - Headers::::insert( - initial_hash, - StoredHeader { - submitter: None, - header: initial_header.clone(), - total_difficulty: initial_difficulty, - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - NextValidatorsSetId::::put(1); - ValidatorsSets::::insert( - 0, - ValidatorsSet { - validators: initial_validators.to_vec(), - signal_block: None, - enact_block: initial_id, - }, - ); - ValidatorsSetsRc::::insert(0, 1); -} - -/// Verify that transaction is included into given finalized block. -pub fn verify_transaction_finalized( - storage: &S, - block: H256, - tx_index: u64, - proof: &[(RawTransaction, RawTransactionReceipt)], -) -> bool { - if tx_index >= proof.len() as _ { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: transaction index ({}) is larger than number of transactions ({})", - tx_index, - proof.len(), - ); - - return false; - } - - let header = match storage.header(&block) { - Some((header, _)) => header, - None => { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: can't find header in the storage: {}", - block, - ); - - return false; - } - }; - let finalized = storage.finalized_block(); - - // if header is not yet finalized => return - if header.number > finalized.number { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: header {}/{} is not finalized. Best finalized: {}", - header.number, - block, - finalized.number, - ); - - return false; - } - - // check if header is actually finalized - let is_finalized = match header.number < finalized.number { - true => ancestry(storage, finalized.hash) - .skip_while(|(_, ancestor)| ancestor.number > header.number) - .any(|(ancestor_hash, _)| ancestor_hash == block), - false => block == finalized.hash, - }; - if !is_finalized { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: header {} is not finalized: no canonical path to best finalized block {}", - block, - finalized.hash, - ); - - return false; - } - - // verify that transaction is included in the block - if let Err(computed_root) = header.check_transactions_root(proof.iter().map(|(tx, _)| tx)) { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: transactions root mismatch. Expected: {}, computed: {}", - header.transactions_root, - computed_root, - ); - - return false; - } - - // verify that transaction receipt is included in the block - if let Err(computed_root) = header.check_raw_receipts_root(proof.iter().map(|(_, r)| r)) { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: receipts root mismatch. Expected: {}, computed: {}", - header.receipts_root, - computed_root, - ); - - return false; - } - - // check that transaction has completed successfully - let is_successful_raw_receipt = Receipt::is_successful_raw_receipt(&proof[tx_index as usize].1); - match is_successful_raw_receipt { - Ok(true) => true, - Ok(false) => { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: receipt shows that transaction has failed", - ); - - false - } - Err(err) => { - frame_support::debug::trace!( - target: "runtime", - "Tx finality check failed: receipt check has failed: {}", - err, - ); - - false - } - } -} - -/// Transaction pool configuration. -fn pool_configuration() -> PoolConfiguration { - PoolConfiguration { - max_future_number_difference: 10, - } -} - -/// Return iterator of given header ancestors. -fn ancestry(storage: &'_ S, mut parent_hash: H256) -> impl Iterator + '_ { - sp_std::iter::from_fn(move || { - let (header, _) = storage.header(&parent_hash)?; - if header.number == 0 { - return None; - } - - let hash = parent_hash; - parent_hash = header.parent_hash; - Some((hash, header)) - }) -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::finality::FinalityAncestor; - use crate::mock::{ - genesis, insert_header, run_test, run_test_with_genesis, validators_addresses, HeaderBuilder, TestRuntime, - GAS_LIMIT, - }; - use crate::test_utils::validator_utils::*; - use bp_eth_poa::compute_merkle_root; - - const TOTAL_VALIDATORS: usize = 3; - - fn example_tx() -> Vec { - vec![42] - } - - fn example_tx_receipt(success: bool) -> Vec { - Receipt { - // the only thing that we care of: - outcome: bp_eth_poa::TransactionOutcome::StatusCode(if success { 1 } else { 0 }), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - } - - fn example_header_with_failed_receipt() -> AuraHeader { - HeaderBuilder::with_parent(&example_header()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(false)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header() -> AuraHeader { - HeaderBuilder::with_parent(&example_header_parent()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn example_header_parent() -> AuraHeader { - HeaderBuilder::with_parent(&genesis()) - .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) - .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) - .sign_by(&validator(0)) - } - - fn with_headers_to_prune(f: impl Fn(BridgeStorage) -> T) -> T { - run_test(TOTAL_VALIDATORS, |ctx| { - for i in 1..10 { - let mut headers_by_number = Vec::with_capacity(5); - for j in 0..5 { - let header = HeaderBuilder::with_parent_number(i - 1) - .gas_limit((GAS_LIMIT + j).into()) - .sign_by_set(&ctx.validators); - let hash = header.compute_hash(); - headers_by_number.push(hash); - Headers::::insert( - hash, - StoredHeader { - submitter: None, - header, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: None, - }, - ); - - if i == 7 && j == 1 { - ScheduledChanges::::insert( - hash, - AuraScheduledChange { - validators: validators_addresses(5), - prev_signal_block: None, - }, - ); - } - } - HeadersByNumber::::insert(i, headers_by_number); - } - - f(BridgeStorage::new()) - }) - } - - #[test] - fn blocks_are_not_pruned_if_range_is_empty() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 5); - assert_eq!(HeadersByNumber::::get(&5).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }, - ); - }); - } - - #[test] - fn blocks_to_prune_never_shrinks_from_the_end() { - with_headers_to_prune(|storage| { - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: 5, - }); - - // try to prune blocks [5; 10) - storage.prune_blocks(0xFFFF, 10, 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 5, - oldest_block_to_keep: 5, - }, - ); - }); - } - - #[test] - fn blocks_are_not_pruned_if_limit_is_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(0, 10, 10); - assert!(HeadersByNumber::::get(&0).is_some()); - assert!(HeadersByNumber::::get(&1).is_some()); - assert!(HeadersByNumber::::get(&2).is_some()); - assert!(HeadersByNumber::::get(&3).is_some()); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 0, - oldest_block_to_keep: 10, - }, - ); - }); - } - - #[test] - fn blocks_are_pruned_if_limit_is_non_zero() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - storage.prune_blocks(7, 10, 10); - // 1 headers with number = 0 is pruned (1 total) - assert!(HeadersByNumber::::get(&0).is_none()); - // 5 headers with number = 1 are pruned (6 total) - assert!(HeadersByNumber::::get(&1).is_none()); - // 1 header with number = 2 are pruned (7 total) - assert_eq!(HeadersByNumber::::get(&2).unwrap().len(), 4); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 2, - oldest_block_to_keep: 10, - }, - ); - - // try to prune blocks [2; 10) - storage.prune_blocks(11, 10, 10); - // 4 headers with number = 2 are pruned (4 total) - assert!(HeadersByNumber::::get(&2).is_none()); - // 5 headers with number = 3 are pruned (9 total) - assert!(HeadersByNumber::::get(&3).is_none()); - // 2 headers with number = 4 are pruned (11 total) - assert_eq!(HeadersByNumber::::get(&4).unwrap().len(), 3); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 4, - oldest_block_to_keep: 10, - }, - ); - }); - } - - #[test] - fn pruning_stops_on_unfainalized_block_with_scheduled_change() { - with_headers_to_prune(|storage| { - // try to prune blocks [0; 10) - // last finalized block is 5 - // and one of blocks#7 has scheduled change - // => we won't prune any block#7 at all - storage.prune_blocks(0xFFFF, 5, 10); - assert!(HeadersByNumber::::get(&0).is_none()); - assert!(HeadersByNumber::::get(&1).is_none()); - assert!(HeadersByNumber::::get(&2).is_none()); - assert!(HeadersByNumber::::get(&3).is_none()); - assert!(HeadersByNumber::::get(&4).is_none()); - assert!(HeadersByNumber::::get(&5).is_none()); - assert!(HeadersByNumber::::get(&6).is_none()); - assert_eq!(HeadersByNumber::::get(&7).unwrap().len(), 5); - assert_eq!( - BlocksToPrune::::get(), - PruningRange { - oldest_unpruned_block: 7, - oldest_block_to_keep: 10, - }, - ); - }); - } - - #[test] - fn finality_votes_are_cached() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - let interval = ::FinalityVotesCachingInterval::get().unwrap(); - - // for all headers with number < interval, cache entry is not created - for i in 1..interval { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - let id = header.compute_id(); - insert_header(&mut storage, header); - assert_eq!(FinalityCache::::get(&id.hash), None); - } - - // for header with number = interval, cache entry is created - let header_with_entry = HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators); - let header_with_entry_hash = header_with_entry.compute_hash(); - insert_header(&mut storage, header_with_entry); - assert!(FinalityCache::::get(&header_with_entry_hash).is_some()); - - // when we later prune this header, cache entry is removed - BlocksToPrune::::put(PruningRange { - oldest_unpruned_block: interval - 1, - oldest_block_to_keep: interval - 1, - }); - storage.finalize_and_prune_headers(None, interval + 1); - assert_eq!(FinalityCache::::get(&header_with_entry_hash), None); - }); - } - - #[test] - fn cached_finality_votes_finds_entry() { - run_test(TOTAL_VALIDATORS, |ctx| { - // insert 5 headers - let mut storage = BridgeStorage::::new(); - let mut headers = Vec::new(); - for i in 1..5 { - let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); - headers.push(header.clone()); - insert_header(&mut storage, header); - } - - // when inserting header#6, entry isn't found - let id5 = headers.last().unwrap().compute_id(); - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: None, - }, - ); - - // let's now create entry at #3 - let hash3 = headers[2].compute_hash(); - let votes_at_3 = FinalityVotes { - votes: vec![([42; 20].into(), 21)].into_iter().collect(), - ancestry: vec![FinalityAncestor { - id: HeaderId { - number: 100, - hash: Default::default(), - }, - ..Default::default() - }] - .into_iter() - .collect(), - }; - FinalityCache::::insert(hash3, votes_at_3.clone()); - - // searching at #6 again => entry is found - assert_eq!( - storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), - CachedFinalityVotes { - stopped_at_finalized_sibling: false, - unaccounted_ancestry: headers - .iter() - .skip(3) - .map(|header| (header.compute_id(), None, header.clone(),)) - .rev() - .collect(), - votes: Some(votes_at_3), - }, - ); - }); - } - - #[test] - fn cached_finality_votes_stops_at_finalized_sibling() { - run_test(TOTAL_VALIDATORS, |ctx| { - let mut storage = BridgeStorage::::new(); - - // insert header1 - let header1 = HeaderBuilder::with_parent_number(0).sign_by_set(&ctx.validators); - let header1_id = header1.compute_id(); - insert_header(&mut storage, header1); - - // insert header1' - sibling of header1 - let header1s = HeaderBuilder::with_parent_number(0) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(&ctx.validators); - let header1s_id = header1s.compute_id(); - insert_header(&mut storage, header1s); - - // header1 is finalized - FinalizedBlock::::put(header1_id); - - // trying to get finality votes when importing header2 -> header1 succeeds - assert!( - !storage - .cached_finality_votes(&header1_id, &genesis().compute_id(), |_| false) - .stopped_at_finalized_sibling - ); - - // trying to get finality votes when importing header2s -> header1s fails - assert!( - storage - .cached_finality_votes(&header1s_id, &header1_id, |_| false) - .stopped_at_finalized_sibling - ); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - true, - ); - }); - } - - #[test] - fn verify_transaction_finalized_works_for_best_finalized_header_ancestor() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header_parent().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - true, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_proof_with_missing_tx() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), - false, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unknown_header() { - run_test(TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), - false, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_unfinalized_header() { - run_test(TOTAL_VALIDATORS, |_| { - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - false, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_sibling() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_sibling = example_header(); - finalized_header_sibling.timestamp = 1; - let finalized_header_sibling_hash = finalized_header_sibling.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, example_header()); - insert_header(&mut storage, finalized_header_sibling); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert_eq!( - verify_transaction_finalized( - &storage, - finalized_header_sibling_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - false, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_finalized_header_uncle() { - run_test(TOTAL_VALIDATORS, |_| { - let mut finalized_header_uncle = example_header_parent(); - finalized_header_uncle.timestamp = 1; - let finalized_header_uncle_hash = finalized_header_uncle.compute_hash(); - - let mut storage = BridgeStorage::::new(); - insert_header(&mut storage, example_header_parent()); - insert_header(&mut storage, finalized_header_uncle); - insert_header(&mut storage, example_header()); - storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); - assert_eq!( - verify_transaction_finalized( - &storage, - finalized_header_uncle_hash, - 0, - &[(example_tx(), example_tx_receipt(true))], - ), - false, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_transactions_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[ - (example_tx(), example_tx_receipt(true)), - (example_tx(), example_tx_receipt(true)) - ], - ), - false, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_invalid_receipts_in_proof() { - run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header().compute_hash(), - 0, - &[(example_tx(), vec![42])], - ), - false, - ); - }); - } - - #[test] - fn verify_transaction_finalized_rejects_failed_transaction() { - run_test_with_genesis(example_header_with_failed_receipt(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - assert_eq!( - verify_transaction_finalized( - &storage, - example_header_with_failed_receipt().compute_hash(), - 0, - &[(example_tx(), example_tx_receipt(false))], - ), - false, - ); - }); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/mock.rs b/polkadot/bridges/modules/ethereum/src/mock.rs deleted file mode 100644 index e812b65f365be311a34beb26e84c7b100335f4b0..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/mock.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -pub use crate::test_utils::{insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT}; -pub use bp_eth_poa::signatures::secret_to_address; - -use crate::validators::{ValidatorsConfiguration, ValidatorsSource}; -use crate::{AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy}; -use bp_eth_poa::{Address, AuraHeader, H256, U256}; -use frame_support::{parameter_types, weights::Weight}; -use secp256k1::SecretKey; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as pallet_ethereum; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Ethereum: pallet_ethereum::{Pallet, Call}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = SubstrateHeader; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -parameter_types! { - pub const TestFinalityVotesCachingInterval: Option = Some(16); - pub TestAuraConfiguration: AuraConfiguration = test_aura_config(); - pub TestValidatorsConfiguration: ValidatorsConfiguration = test_validators_config(); -} - -impl Config for TestRuntime { - type AuraConfiguration = TestAuraConfiguration; - type ValidatorsConfiguration = TestValidatorsConfiguration; - type FinalityVotesCachingInterval = TestFinalityVotesCachingInterval; - type PruningStrategy = KeepSomeHeadersBehindBest; - type ChainTime = ConstChainTime; - type OnHeadersSubmitted = (); -} - -/// Test context. -pub struct TestContext { - /// Initial (genesis) header. - pub genesis: AuraHeader, - /// Number of initial validators. - pub total_validators: usize, - /// Secret keys of validators, ordered by validator index. - pub validators: Vec, - /// Addresses of validators, ordered by validator index. - pub addresses: Vec
, -} - -/// Aura configuration that is used in tests by default. -pub fn test_aura_config() -> AuraConfiguration { - AuraConfiguration { - empty_steps_transition: u64::max_value(), - strict_empty_steps_transition: 0, - validate_step_transition: 0x16e360, - validate_score_transition: 0x41a3c4, - two_thirds_majority_transition: u64::max_value(), - min_gas_limit: 0x1388.into(), - max_gas_limit: U256::max_value(), - maximum_extra_data_size: 0x20, - } -} - -/// Validators configuration that is used in tests by default. -pub fn test_validators_config() -> ValidatorsConfiguration { - ValidatorsConfiguration::Single(ValidatorsSource::List(validators_addresses(3))) -} - -/// Genesis header that is used in tests by default. -pub fn genesis() -> AuraHeader { - HeaderBuilder::genesis().sign_by(&validator(0)) -} - -/// Run test with default genesis header. -pub fn run_test(total_validators: usize, test: impl FnOnce(TestContext) -> T) -> T { - run_test_with_genesis(genesis(), total_validators, test) -} - -/// Run test with default genesis header. -pub fn run_test_with_genesis( - genesis: AuraHeader, - total_validators: usize, - test: impl FnOnce(TestContext) -> T, -) -> T { - let validators = validators(total_validators); - let addresses = validators_addresses(total_validators); - sp_io::TestExternalities::new( - CrateGenesisConfig { - initial_header: genesis.clone(), - initial_difficulty: 0.into(), - initial_validators: addresses.clone(), - } - .build_storage::() - .unwrap(), - ) - .execute_with(|| { - test(TestContext { - genesis, - total_validators, - validators, - addresses, - }) - }) -} - -/// Pruning strategy that keeps 10 headers behind best block. -pub struct KeepSomeHeadersBehindBest(pub u64); - -impl Default for KeepSomeHeadersBehindBest { - fn default() -> KeepSomeHeadersBehindBest { - KeepSomeHeadersBehindBest(10) - } -} - -impl PruningStrategy for KeepSomeHeadersBehindBest { - fn pruning_upper_bound(&mut self, best_number: u64, _: u64) -> u64 { - best_number.saturating_sub(self.0) - } -} - -/// Constant chain time -#[derive(Default)] -pub struct ConstChainTime; - -impl ChainTime for ConstChainTime { - fn is_timestamp_ahead(&self, timestamp: u64) -> bool { - let now = i32::max_value() as u64 / 2; - timestamp > now - } -} diff --git a/polkadot/bridges/modules/ethereum/src/test_utils.rs b/polkadot/bridges/modules/ethereum/src/test_utils.rs deleted file mode 100644 index ad4019412892da1d876d70d3c059fc7875f9b5ca..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/test_utils.rs +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for testing and benchmarking the Ethereum Bridge Pallet. -//! -//! Although the name implies that it is used by tests, it shouldn't be be used _directly_ by tests. -//! Instead these utilities should be used by the Mock runtime, which in turn is used by tests. -//! -//! On the other hand, they may be used directly by the bechmarking module. - -// Since this is test code it's fine that not everything is used -#![allow(dead_code)] - -use crate::finality::FinalityVotes; -use crate::validators::CHANGE_EVENT_HASH; -use crate::verification::calculate_score; -use crate::{Config, HeaderToImport, Storage}; - -use bp_eth_poa::{ - rlp_encode, - signatures::{secret_to_address, sign, SignHeader}, - Address, AuraHeader, Bloom, Receipt, SealedEmptyStep, H256, U256, -}; -use secp256k1::SecretKey; -use sp_std::prelude::*; - -/// Gas limit valid in test environment. -pub const GAS_LIMIT: u64 = 0x2000; - -/// Test header builder. -pub struct HeaderBuilder { - header: AuraHeader, - parent_header: AuraHeader, -} - -impl HeaderBuilder { - /// Creates default genesis header. - pub fn genesis() -> Self { - let current_step = 0u64; - Self { - header: AuraHeader { - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - ..Default::default() - }, - parent_header: Default::default(), - } - } - - /// Creates default header on top of test parent with given hash. - #[cfg(test)] - pub fn with_parent_hash(parent_hash: H256) -> Self { - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of test parent with given number. First parent is selected. - #[cfg(test)] - pub fn with_parent_number(parent_number: u64) -> Self { - Self::with_parent_number_on_runtime::(parent_number) - } - - /// Creates default header on top of parent with given hash. - pub fn with_parent_hash_on_runtime, I: crate::Instance>(parent_hash: H256) -> Self { - use crate::Headers; - use frame_support::StorageMap; - - let parent_header = Headers::::get(&parent_hash).unwrap().header; - Self::with_parent(&parent_header) - } - - /// Creates default header on top of parent with given number. First parent is selected. - pub fn with_parent_number_on_runtime, I: crate::Instance>(parent_number: u64) -> Self { - use crate::HeadersByNumber; - use frame_support::StorageMap; - - let parent_hash = HeadersByNumber::::get(parent_number).unwrap()[0]; - Self::with_parent_hash_on_runtime::(parent_hash) - } - - /// Creates default header on top of non-existent parent. - #[cfg(test)] - pub fn with_number(number: u64) -> Self { - Self::with_parent(&AuraHeader { - number: number - 1, - seal: vec![bp_eth_poa::rlp_encode(&(number - 1)).to_vec(), vec![]], - ..Default::default() - }) - } - - /// Creates default header on top of given parent. - pub fn with_parent(parent_header: &AuraHeader) -> Self { - let parent_step = parent_header.step().unwrap(); - let current_step = parent_step + 1; - Self { - header: AuraHeader { - parent_hash: parent_header.compute_hash(), - number: parent_header.number + 1, - gas_limit: GAS_LIMIT.into(), - seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], - difficulty: calculate_score(parent_step, current_step, 0), - ..Default::default() - }, - parent_header: parent_header.clone(), - } - } - - /// Update step of this header. - pub fn step(mut self, step: u64) -> Self { - let parent_step = self.parent_header.step(); - self.header.seal[0] = rlp_encode(&step).to_vec(); - self.header.difficulty = parent_step - .map(|parent_step| calculate_score(parent_step, step, 0)) - .unwrap_or_default(); - self - } - - /// Adds empty steps to this header. - pub fn empty_steps(mut self, empty_steps: &[(&SecretKey, u64)]) -> Self { - let sealed_empty_steps = empty_steps - .iter() - .map(|(author, step)| { - let mut empty_step = SealedEmptyStep { - step: *step, - signature: Default::default(), - }; - let message = empty_step.message(&self.header.parent_hash); - let signature: [u8; 65] = sign(author, message).into(); - empty_step.signature = signature.into(); - empty_step - }) - .collect::>(); - - // by default in test configuration headers are generated without empty steps seal - if self.header.seal.len() < 3 { - self.header.seal.push(Vec::new()); - } - - self.header.seal[2] = SealedEmptyStep::rlp_of(&sealed_empty_steps); - self - } - - /// Update difficulty field of this header. - pub fn difficulty(mut self, difficulty: U256) -> Self { - self.header.difficulty = difficulty; - self - } - - /// Update extra data field of this header. - pub fn extra_data(mut self, extra_data: Vec) -> Self { - self.header.extra_data = extra_data; - self - } - - /// Update gas limit field of this header. - pub fn gas_limit(mut self, gas_limit: U256) -> Self { - self.header.gas_limit = gas_limit; - self - } - - /// Update gas used field of this header. - pub fn gas_used(mut self, gas_used: U256) -> Self { - self.header.gas_used = gas_used; - self - } - - /// Update log bloom field of this header. - pub fn log_bloom(mut self, log_bloom: Bloom) -> Self { - self.header.log_bloom = log_bloom; - self - } - - /// Update receipts root field of this header. - pub fn receipts_root(mut self, receipts_root: H256) -> Self { - self.header.receipts_root = receipts_root; - self - } - - /// Update timestamp field of this header. - pub fn timestamp(mut self, timestamp: u64) -> Self { - self.header.timestamp = timestamp; - self - } - - /// Update transactions root field of this header. - pub fn transactions_root(mut self, transactions_root: H256) -> Self { - self.header.transactions_root = transactions_root; - self - } - - /// Signs header by given author. - pub fn sign_by(self, author: &SecretKey) -> AuraHeader { - self.header.sign_by(author) - } - - /// Signs header by given authors set. - pub fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader { - self.header.sign_by_set(authors) - } -} - -/// Helper function for getting a genesis header which has been signed by an authority. -pub fn build_genesis_header(author: &SecretKey) -> AuraHeader { - let genesis = HeaderBuilder::genesis(); - genesis.header.sign_by(&author) -} - -/// Helper function for building a custom child header which has been signed by an authority. -pub fn build_custom_header(author: &SecretKey, previous: &AuraHeader, customize_header: F) -> AuraHeader -where - F: FnOnce(AuraHeader) -> AuraHeader, -{ - let new_header = HeaderBuilder::with_parent(&previous); - let custom_header = customize_header(new_header.header); - custom_header.sign_by(author) -} - -/// Insert unverified header into storage. -/// -/// This function assumes that the header is signed by validator from the current set. -pub fn insert_header(storage: &mut S, header: AuraHeader) { - let id = header.compute_id(); - let best_finalized = storage.finalized_block(); - let import_context = storage.import_context(None, &header.parent_hash).unwrap(); - let parent_finality_votes = storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false); - let finality_votes = crate::finality::prepare_votes( - parent_finality_votes, - best_finalized, - &import_context.validators_set().validators.iter().collect(), - id, - &header, - None, - ) - .unwrap(); - - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id, - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes, - }); -} - -/// Insert unverified header into storage. -/// -/// No assumptions about header author are made. The cost is that finality votes cache -/// is filled incorrectly, so this function shall not be used if you're going to insert -/// (or import) header descendants. -pub fn insert_dummy_header(storage: &mut S, header: AuraHeader) { - storage.insert_header(HeaderToImport { - context: storage.import_context(None, &header.parent_hash).unwrap(), - is_best: true, - id: header.compute_id(), - header, - total_difficulty: 0.into(), - enacted_change: None, - scheduled_change: None, - finality_votes: FinalityVotes::default(), - }); -} - -pub fn validators_change_receipt(parent_hash: H256) -> Receipt { - use bp_eth_poa::{LogEntry, TransactionOutcome}; - - Receipt { - gas_used: 0.into(), - log_bloom: (&[0xff; 256]).into(), - outcome: TransactionOutcome::Unknown, - logs: vec![LogEntry { - address: [3; 20].into(), - topics: vec![CHANGE_EVENT_HASH.into(), parent_hash], - data: vec![ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - ], - }], - } -} - -pub mod validator_utils { - use super::*; - - /// Return key pair of given test validator. - pub fn validator(index: usize) -> SecretKey { - let mut raw_secret = [0u8; 32]; - raw_secret[..8].copy_from_slice(&(index + 1).to_le_bytes()); - SecretKey::parse(&raw_secret).unwrap() - } - - /// Return key pairs of all test validators. - pub fn validators(count: usize) -> Vec { - (0..count).map(validator).collect() - } - - /// Return address of test validator. - pub fn validator_address(index: usize) -> Address { - secret_to_address(&validator(index)) - } - - /// Return addresses of all test validators. - pub fn validators_addresses(count: usize) -> Vec
{ - (0..count).map(validator_address).collect() - } -} diff --git a/polkadot/bridges/modules/ethereum/src/validators.rs b/polkadot/bridges/modules/ethereum/src/validators.rs deleted file mode 100644 index d4ddac66b7df0610aa3f10d5404f04ccfd31ed10..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/validators.rs +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::{ChangeToEnact, Storage}; -use bp_eth_poa::{Address, AuraHeader, HeaderId, LogEntry, Receipt, U256}; -use sp_std::prelude::*; - -/// The hash of InitiateChange event of the validators set contract. -pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[ - 0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, 0xd2, 0xc2, 0x28, - 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89, -]; - -/// Where source of validators addresses come from. This covers the chain lifetime. -pub enum ValidatorsConfiguration { - /// There's a single source for the whole chain lifetime. - Single(ValidatorsSource), - /// Validators source changes at given blocks. The blocks are ordered - /// by the block number. - Multi(Vec<(u64, ValidatorsSource)>), -} - -/// Where validators addresses come from. -/// -/// This source is valid within some blocks range. The blocks range could -/// cover multiple epochs - i.e. the validators that are authoring blocks -/// within this range could change, but the source itself can not. -#[cfg_attr(any(test, feature = "runtime-benchmarks"), derive(Debug, PartialEq))] -pub enum ValidatorsSource { - /// The validators addresses are hardcoded and never change. - List(Vec
), - /// The validators addresses are determined by the validators set contract - /// deployed at given address. The contract must implement the `ValidatorSet` - /// interface. Additionally, the initial validators set must be provided. - Contract(Address, Vec
), -} - -/// A short hand for optional validators change. -pub type ValidatorsChange = Option>; - -/// Validators manager. -pub struct Validators<'a> { - config: &'a ValidatorsConfiguration, -} - -impl<'a> Validators<'a> { - /// Creates new validators manager using given configuration. - pub fn new(config: &'a ValidatorsConfiguration) -> Self { - Self { config } - } - - /// Returns true if header (probabilistically) signals validators change and - /// the caller needs to provide transactions receipts to import the header. - pub fn maybe_signals_validators_change(&self, header: &AuraHeader) -> bool { - let (_, _, source) = self.source_at(header.number); - - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return false, - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - header.log_bloom.contains(&expected_bloom) - } - - /// Extracts validators change signal from the header. - /// - /// Returns tuple where first element is the change scheduled by this header - /// (i.e. this change is only applied starting from the block that has finalized - /// current block). The second element is the immediately applied change. - pub fn extract_validators_change( - &self, - header: &AuraHeader, - receipts: Option>, - ) -> Result<(ValidatorsChange, ValidatorsChange), Error> { - // let's first check if new source is starting from this header - let (source_index, _, source) = self.source_at(header.number); - let (next_starts_at, next_source) = self.source_at_next_header(source_index, header.number); - if next_starts_at == header.number { - match *next_source { - ValidatorsSource::List(ref new_list) => return Ok((None, Some(new_list.clone()))), - ValidatorsSource::Contract(_, ref new_list) => return Ok((Some(new_list.clone()), None)), - } - } - - // else deal with previous source - // - // if we are taking validators set from the fixed list, there's always - // single epoch - // => we never require transactions receipts - let contract_address = match source { - ValidatorsSource::List(_) => return Ok((None, None)), - ValidatorsSource::Contract(contract_address, _) => contract_address, - }; - - // else we need to check logs bloom and if it has required bits set, it means - // that the contract has (probably) emitted epoch change event - let expected_bloom = LogEntry { - address: *contract_address, - topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], - data: Vec::new(), // irrelevant for bloom. - } - .bloom(); - - if !header.log_bloom.contains(&expected_bloom) { - return Ok((None, None)); - } - - let receipts = receipts.ok_or(Error::MissingTransactionsReceipts)?; - if header.check_receipts_root(&receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch); - } - - // iterate in reverse because only the _last_ change in a given - // block actually has any effect - Ok(( - receipts - .iter() - .rev() - .filter(|r| r.log_bloom.contains(&expected_bloom)) - .flat_map(|r| r.logs.iter()) - .filter(|l| { - l.address == *contract_address - && l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH - && l.topics[1] == header.parent_hash - }) - .filter_map(|l| { - let data_len = l.data.len(); - if data_len < 64 { - return None; - } - - let new_validators_len_u256 = U256::from_big_endian(&l.data[32..64]); - let new_validators_len = new_validators_len_u256.low_u64(); - if new_validators_len_u256 != new_validators_len.into() { - return None; - } - - if (data_len - 64) as u64 != new_validators_len.saturating_mul(32) { - return None; - } - - Some( - l.data[64..] - .chunks(32) - .map(|chunk| { - let mut new_validator = Address::default(); - new_validator.as_mut().copy_from_slice(&chunk[12..32]); - new_validator - }) - .collect(), - ) - }) - .next(), - None, - )) - } - - /// Finalize changes when blocks are finalized. - pub fn finalize_validators_change( - &self, - storage: &S, - finalized_blocks: &[(HeaderId, Option)], - ) -> Option { - // if we haven't finalized any blocks, no changes may be finalized - let newest_finalized_id = match finalized_blocks.last().map(|(id, _)| id) { - Some(last_finalized_id) => last_finalized_id, - None => return None, - }; - let oldest_finalized_id = finalized_blocks - .first() - .map(|(id, _)| id) - .expect("finalized_blocks is not empty; qed"); - - // try to directly go to the header that has scheduled last change - // - // if we're unable to create import context for some block, it means - // that the header has already been pruned => it and its ancestors had - // no scheduled changes - // - // if we're unable to find scheduled changes for some block, it means - // that these changes have been finalized already - storage - .import_context(None, &newest_finalized_id.hash) - .and_then(|context| context.last_signal_block()) - .and_then(|signal_block| { - if signal_block.number >= oldest_finalized_id.number { - Some(signal_block) - } else { - None - } - }) - .and_then(|signal_block| { - storage - .scheduled_change(&signal_block.hash) - .map(|change| ChangeToEnact { - signal_block: Some(signal_block), - validators: change.validators, - }) - }) - } - - /// Returns source of validators that should author the header. - fn source_at(&self, header_number: u64) -> (usize, u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, 0, source), - ValidatorsConfiguration::Multi(ref sources) => sources - .iter() - .rev() - .enumerate() - .find(|(_, &(begin, _))| begin < header_number) - .map(|(i, (begin, source))| (sources.len() - 1 - i, *begin, source)) - .expect( - "there's always entry for the initial block;\ - we do not touch any headers with number < initial block number; qed", - ), - } - } - - /// Returns source of validators that should author the next header. - fn source_at_next_header(&self, header_source_index: usize, header_number: u64) -> (u64, &ValidatorsSource) { - match self.config { - ValidatorsConfiguration::Single(ref source) => (0, source), - ValidatorsConfiguration::Multi(ref sources) => { - let next_source_index = header_source_index + 1; - if next_source_index < sources.len() { - let next_source = &sources[next_source_index]; - if next_source.0 < header_number + 1 { - return (next_source.0, &next_source.1); - } - } - - let source = &sources[header_source_index]; - (source.0, &source.1) - } - } - } -} - -impl ValidatorsSource { - /// Returns initial validators set. - pub fn initial_epoch_validators(&self) -> Vec
{ - match self { - ValidatorsSource::List(ref list) => list.clone(), - ValidatorsSource::Contract(_, ref list) => list.clone(), - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime}; - use crate::DefaultInstance; - use crate::{AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader}; - use bp_eth_poa::compute_merkle_root; - use frame_support::StorageMap; - - const TOTAL_VALIDATORS: usize = 3; - - #[test] - fn source_at_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - - assert_eq!( - validators.source_at(99), - (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(0, 99), - (0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - - assert_eq!( - validators.source_at(100), - (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(0, 100), - (100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - - assert_eq!( - validators.source_at(200), - (1, 100, &ValidatorsSource::List(vec![[2; 20].into()])), - ); - assert_eq!( - validators.source_at_next_header(1, 200), - (200, &ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ); - } - - #[test] - fn maybe_signals_validators_change_works() { - // when contract is active, but bloom has no required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); - let validators = Validators::new(&config); - let mut header = AuraHeader { - number: u64::max_value(), - ..Default::default() - }; - assert!(!validators.maybe_signals_validators_change(&header)); - - // when contract is active and bloom has required bits set - header.log_bloom = (&[0xff; 256]).into(); - assert!(validators.maybe_signals_validators_change(&header)); - - // when list is active and bloom has required bits set - let config = ValidatorsConfiguration::Single(ValidatorsSource::List(vec![[42; 20].into()])); - let validators = Validators::new(&config); - assert!(!validators.maybe_signals_validators_change(&header)); - } - - #[test] - fn extract_validators_change_works() { - let config = ValidatorsConfiguration::Multi(vec![ - (0, ValidatorsSource::List(vec![[1; 20].into()])), - (100, ValidatorsSource::List(vec![[2; 20].into()])), - (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), - ]); - let validators = Validators::new(&config); - let mut header = AuraHeader { - number: 100, - ..Default::default() - }; - - // when we're at the block that switches to list source - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((None, Some(vec![[2; 20].into()]))), - ); - - // when we're inside list range - header.number = 150; - assert_eq!(validators.extract_validators_change(&header, None), Ok((None, None)),); - - // when we're at the block that switches to contract source - header.number = 200; - assert_eq!( - validators.extract_validators_change(&header, None), - Ok((Some(vec![[3; 20].into()]), None)), - ); - - // when we're inside contract range and logs bloom signals change - // but we have no receipts - header.number = 250; - header.log_bloom = (&[0xff; 256]).into(); - assert_eq!( - validators.extract_validators_change(&header, None), - Err(Error::MissingTransactionsReceipts), - ); - - // when we're inside contract range and logs bloom signals change - // but there's no change in receipts - header.receipts_root = "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - .parse() - .unwrap(); - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Ok((None, None)), - ); - - // when we're inside contract range and logs bloom signals change - // and there's change in receipts - let receipts = vec![validators_change_receipt(Default::default())]; - header.receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - assert_eq!( - validators.extract_validators_change(&header, Some(receipts)), - Ok((Some(vec![[7; 20].into()]), None)), - ); - - // when incorrect receipts root passed - assert_eq!( - validators.extract_validators_change(&header, Some(Vec::new())), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - fn try_finalize_with_scheduled_change(scheduled_at: Option) -> Option { - run_test(TOTAL_VALIDATORS, |_| { - let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); - let validators = Validators::new(&config); - let storage = BridgeStorage::::new(); - - // when we're finailizing blocks 10...100 - let id10 = HeaderId { - number: 10, - hash: [10; 32].into(), - }; - let id100 = HeaderId { - number: 100, - hash: [100; 32].into(), - }; - let finalized_blocks = vec![(id10, None), (id100, None)]; - let header100 = StoredHeader:: { - submitter: None, - header: AuraHeader { - number: 100, - ..Default::default() - }, - total_difficulty: 0.into(), - next_validators_set_id: 0, - last_signal_block: scheduled_at, - }; - let scheduled_change = AuraScheduledChange { - validators: validators_addresses(1), - prev_signal_block: None, - }; - Headers::::insert(id100.hash, header100); - if let Some(scheduled_at) = scheduled_at { - ScheduledChanges::::insert(scheduled_at.hash, scheduled_change); - } - - validators.finalize_validators_change(&storage, &finalized_blocks) - }) - } - - #[test] - fn finalize_validators_change_finalizes_scheduled_change() { - let id50 = HeaderId { - number: 50, - ..Default::default() - }; - assert_eq!( - try_finalize_with_scheduled_change(Some(id50)), - Some(ChangeToEnact { - signal_block: Some(id50), - validators: validators_addresses(1), - }), - ); - } - - #[test] - fn finalize_validators_change_does_not_finalize_when_changes_are_not_scheduled() { - assert_eq!(try_finalize_with_scheduled_change(None), None,); - } - - #[test] - fn finalize_validators_change_does_not_finalize_changes_when_they_are_outside_of_range() { - let id5 = HeaderId { - number: 5, - ..Default::default() - }; - assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None,); - } -} diff --git a/polkadot/bridges/modules/ethereum/src/verification.rs b/polkadot/bridges/modules/ethereum/src/verification.rs deleted file mode 100644 index 3882e6b529edf76f1443c67afa8df88514983a73..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/ethereum/src/verification.rs +++ /dev/null @@ -1,945 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::error::Error; -use crate::validators::{Validators, ValidatorsConfiguration}; -use crate::{AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage}; -use bp_eth_poa::{ - public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep, H256, H520, U128, U256, -}; -use codec::Encode; -use sp_io::crypto::secp256k1_ecdsa_recover; -use sp_runtime::transaction_validity::TransactionTag; -use sp_std::{vec, vec::Vec}; - -/// Pre-check to see if should try and import this header. -/// Returns error if we should not try to import this block. -/// Returns ID of passed header and best finalized header. -pub fn is_importable_header(storage: &S, header: &AuraHeader) -> Result<(HeaderId, HeaderId), Error> { - // we never import any header that competes with finalized header - let finalized_id = storage.finalized_block(); - if header.number <= finalized_id.number { - return Err(Error::AncientHeader); - } - // we never import any header with known hash - let id = header.compute_id(); - if storage.header(&id.hash).is_some() { - return Err(Error::KnownHeader); - } - - Ok((id, finalized_id)) -} - -/// Try accept unsigned aura header into transaction pool. -/// -/// Returns required and provided tags. -pub fn accept_aura_header_into_pool( - storage: &S, - config: &AuraConfiguration, - validators_config: &ValidatorsConfiguration, - pool_config: &PoolConfiguration, - header: &AuraHeader, - chain_time: &CT, - receipts: Option<&Vec>, -) -> Result<(Vec, Vec), Error> { - // check if we can verify further - let (header_id, _) = is_importable_header(storage, header)?; - - // we can always do contextless checks - contextless_checks(config, header, chain_time)?; - - // we want to avoid having same headers twice in the pool - // => we're strict about receipts here - if we need them, we require receipts to be Some, - // otherwise we require receipts to be None - let receipts_required = Validators::new(validators_config).maybe_signals_validators_change(header); - match (receipts_required, receipts.is_some()) { - (true, false) => return Err(Error::MissingTransactionsReceipts), - (false, true) => return Err(Error::RedundantTransactionsReceipts), - _ => (), - } - - // we do not want to have all future headers in the pool at once - // => if we see header with number > maximal ever seen header number + LIMIT, - // => we consider this transaction invalid, but only at this moment (we do not want to ban it) - // => let's mark it as Unknown transaction - let (best_id, _) = storage.best_block(); - let difference = header.number.saturating_sub(best_id.number); - if difference > pool_config.max_future_number_difference { - return Err(Error::UnsignedTooFarInTheFuture); - } - - // TODO: only accept new headers when we're at the tip of PoA chain - // https://github.com/paritytech/parity-bridges-common/issues/38 - - // we want to see at most one header with given number from single authority - // => every header is providing tag (block_number + authority) - // => since only one tx in the pool can provide the same tag, they're auto-deduplicated - let provides_number_and_authority_tag = (header.number, header.author).encode(); - - // we want to see several 'future' headers in the pool at once, but we may not have access to - // previous headers here - // => we can at least 'verify' that headers comprise a chain by providing and requiring - // tag (header.number, header.hash) - let provides_header_number_and_hash_tag = header_id.encode(); - - // depending on whether parent header is available, we either perform full or 'shortened' check - let context = storage.import_context(None, &header.parent_hash); - let tags = match context { - Some(context) => { - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - // since our parent is already in the storage, we do not require it - // to be in the transaction pool - ( - vec![], - vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], - ) - } - None => { - // we know nothing about parent header - // => the best thing we can do is to believe that there are no forks in - // PoA chain AND that the header is produced either by previous, or next - // scheduled validators set change - let header_step = header.step().ok_or(Error::MissingStep)?; - let best_context = storage.import_context(None, &best_id.hash).expect( - "import context is None only when header is missing from the storage;\ - best header is always in the storage; qed", - ); - let validators_check_result = - validator_checks(config, &best_context.validators_set().validators, header, header_step); - if let Err(error) = validators_check_result { - find_next_validators_signal(storage, &best_context) - .ok_or(error) - .and_then(|next_validators| validator_checks(config, &next_validators, header, header_step))?; - } - - // since our parent is missing from the storage, we **DO** require it - // to be in the transaction pool - // (- 1 can't underflow because there's always best block in the header) - let requires_header_number_and_hash_tag = HeaderId { - number: header.number - 1, - hash: header.parent_hash, - } - .encode(); - ( - vec![requires_header_number_and_hash_tag], - vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], - ) - } - }; - - // the heaviest, but rare operation - we do not want invalid receipts in the pool - if let Some(receipts) = receipts { - frame_support::debug::trace!(target: "runtime", "Got receipts! {:?}", receipts); - if header.check_receipts_root(receipts).is_err() { - return Err(Error::TransactionsReceiptsMismatch); - } - } - - Ok(tags) -} - -/// Verify header by Aura rules. -pub fn verify_aura_header( - storage: &S, - config: &AuraConfiguration, - submitter: Option, - header: &AuraHeader, - chain_time: &CT, -) -> Result, Error> { - // let's do the lightest check first - contextless_checks(config, header, chain_time)?; - - // the rest of checks requires access to the parent header - let context = storage.import_context(submitter, &header.parent_hash).ok_or_else(|| { - frame_support::debug::warn!( - target: "runtime", - "Missing parent PoA block: ({:?}, {})", - header.number.checked_sub(1), - header.parent_hash, - ); - - Error::MissingParentBlock - })?; - let header_step = contextual_checks(config, &context, None, header)?; - validator_checks(config, &context.validators_set().validators, header, header_step)?; - - Ok(context) -} - -/// Perform basic checks that only require header itself. -fn contextless_checks( - config: &AuraConfiguration, - header: &AuraHeader, - chain_time: &CT, -) -> Result<(), Error> { - let expected_seal_fields = expected_header_seal_fields(config, header); - if header.seal.len() != expected_seal_fields { - return Err(Error::InvalidSealArity); - } - if header.number >= u64::max_value() { - return Err(Error::RidiculousNumber); - } - if header.gas_used > header.gas_limit { - return Err(Error::TooMuchGasUsed); - } - if header.gas_limit < config.min_gas_limit { - return Err(Error::InvalidGasLimit); - } - if header.gas_limit > config.max_gas_limit { - return Err(Error::InvalidGasLimit); - } - if header.number != 0 && header.extra_data.len() as u64 > config.maximum_extra_data_size { - return Err(Error::ExtraDataOutOfBounds); - } - - // we can't detect if block is from future in runtime - // => let's only do an overflow check - if header.timestamp > i32::max_value() as u64 { - return Err(Error::TimestampOverflow); - } - - if chain_time.is_timestamp_ahead(header.timestamp) { - return Err(Error::HeaderTimestampIsAhead); - } - - Ok(()) -} - -/// Perform checks that require access to parent header. -fn contextual_checks( - config: &AuraConfiguration, - context: &ImportContext, - validators_override: Option<&[Address]>, - header: &AuraHeader, -) -> Result { - let validators = validators_override.unwrap_or_else(|| &context.validators_set().validators); - let header_step = header.step().ok_or(Error::MissingStep)?; - let parent_step = context.parent_header().step().ok_or(Error::MissingStep)?; - - // Ensure header is from the step after context. - if header_step == parent_step { - return Err(Error::DoubleVote); - } - #[allow(clippy::suspicious_operation_groupings)] - if header.number >= config.validate_step_transition && header_step < parent_step { - return Err(Error::DoubleVote); - } - - // If empty step messages are enabled we will validate the messages in the seal, missing messages are not - // reported as there's no way to tell whether the empty step message was never sent or simply not included. - let empty_steps_len = match header.number >= config.empty_steps_transition { - true => { - let strict_empty_steps = header.number >= config.strict_empty_steps_transition; - let empty_steps = header.empty_steps().ok_or(Error::MissingEmptySteps)?; - let empty_steps_len = empty_steps.len(); - let mut prev_empty_step = 0; - - for empty_step in empty_steps { - if empty_step.step <= parent_step || empty_step.step >= header_step { - return Err(Error::InsufficientProof); - } - - if !verify_empty_step(&header.parent_hash, &empty_step, validators) { - return Err(Error::InsufficientProof); - } - - if strict_empty_steps { - if empty_step.step <= prev_empty_step { - return Err(Error::InsufficientProof); - } - - prev_empty_step = empty_step.step; - } - } - - empty_steps_len - } - false => 0, - }; - - // Validate chain score. - if header.number >= config.validate_score_transition { - let expected_difficulty = calculate_score(parent_step, header_step, empty_steps_len as _); - if header.difficulty != expected_difficulty { - return Err(Error::InvalidDifficulty); - } - } - - Ok(header_step) -} - -/// Check that block is produced by expected validator. -fn validator_checks( - config: &AuraConfiguration, - validators: &[Address], - header: &AuraHeader, - header_step: u64, -) -> Result<(), Error> { - let expected_validator = *step_validator(validators, header_step); - if header.author != expected_validator { - return Err(Error::NotValidator); - } - - let validator_signature = header.signature().ok_or(Error::MissingSignature)?; - let header_seal_hash = header - .seal_hash(header.number >= config.empty_steps_transition) - .ok_or(Error::MissingEmptySteps)?; - let is_invalid_proposer = !verify_signature(&expected_validator, &validator_signature, &header_seal_hash); - if is_invalid_proposer { - return Err(Error::NotValidator); - } - - Ok(()) -} - -/// Returns expected number of seal fields in the header. -fn expected_header_seal_fields(config: &AuraConfiguration, header: &AuraHeader) -> usize { - if header.number != u64::max_value() && header.number >= config.empty_steps_transition { - 3 - } else { - 2 - } -} - -/// Verify single sealed empty step. -fn verify_empty_step(parent_hash: &H256, step: &SealedEmptyStep, validators: &[Address]) -> bool { - let expected_validator = *step_validator(validators, step.step); - let message = step.message(parent_hash); - verify_signature(&expected_validator, &step.signature, &message) -} - -/// Chain scoring: total weight is sqrt(U256::max_value())*height - step -pub(crate) fn calculate_score(parent_step: u64, current_step: u64, current_empty_steps: usize) -> U256 { - U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + U256::from(current_empty_steps) -} - -/// Verify that the signature over message has been produced by given validator. -fn verify_signature(expected_validator: &Address, signature: &H520, message: &H256) -> bool { - secp256k1_ecdsa_recover(signature.as_fixed_bytes(), message.as_fixed_bytes()) - .map(|public| public_to_address(&public)) - .map(|address| *expected_validator == address) - .unwrap_or(false) -} - -/// Find next unfinalized validators set change after finalized set. -fn find_next_validators_signal(storage: &S, context: &ImportContext) -> Option> { - // that's the earliest block number we may met in following loop - // it may be None if that's the first set - let best_set_signal_block = context.validators_set().signal_block; - - // if parent schedules validators set change, then it may be our set - // else we'll start with last known change - let mut current_set_signal_block = context.last_signal_block(); - let mut next_scheduled_set: Option = None; - - loop { - // if we have reached block that signals finalized change, then - // next_current_block_hash points to the block that schedules next - // change - let current_scheduled_set = match current_set_signal_block { - Some(current_set_signal_block) if Some(¤t_set_signal_block) == best_set_signal_block.as_ref() => { - return next_scheduled_set.map(|scheduled_set| scheduled_set.validators) - } - None => return next_scheduled_set.map(|scheduled_set| scheduled_set.validators), - Some(current_set_signal_block) => storage.scheduled_change(¤t_set_signal_block.hash).expect( - "header that is associated with this change is not pruned;\ - scheduled changes are only removed when header is pruned; qed", - ), - }; - - current_set_signal_block = current_scheduled_set.prev_signal_block; - next_scheduled_set = Some(current_scheduled_set); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - insert_header, run_test_with_genesis, test_aura_config, validator, validator_address, validators_addresses, - validators_change_receipt, AccountId, ConstChainTime, HeaderBuilder, TestRuntime, GAS_LIMIT, - }; - use crate::validators::ValidatorsSource; - use crate::DefaultInstance; - use crate::{ - pool_configuration, BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId, - ScheduledChanges, ValidatorsSet, ValidatorsSets, - }; - use bp_eth_poa::{compute_merkle_root, rlp_encode, TransactionOutcome, H520, U256}; - use frame_support::{StorageMap, StorageValue}; - use hex_literal::hex; - use secp256k1::SecretKey; - use sp_runtime::transaction_validity::TransactionTag; - - const GENESIS_STEP: u64 = 42; - const TOTAL_VALIDATORS: usize = 3; - - fn genesis() -> AuraHeader { - HeaderBuilder::genesis().step(GENESIS_STEP).sign_by(&validator(0)) - } - - fn verify_with_config(config: &AuraConfiguration, header: &AuraHeader) -> Result, Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let storage = BridgeStorage::::new(); - verify_aura_header(&storage, &config, None, header, &ConstChainTime::default()) - }) - } - - fn default_verify(header: &AuraHeader) -> Result, Error> { - verify_with_config(&test_aura_config(), header) - } - - fn default_accept_into_pool( - mut make_header: impl FnMut(&[SecretKey]) -> (AuraHeader, Option>), - ) -> Result<(Vec, Vec), Error> { - run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { - let validators = vec![validator(0), validator(1), validator(2)]; - let mut storage = BridgeStorage::::new(); - let block1 = HeaderBuilder::with_parent_number(0).sign_by_set(&validators); - insert_header(&mut storage, block1); - let block2 = HeaderBuilder::with_parent_number(1).sign_by_set(&validators); - let block2_id = block2.compute_id(); - insert_header(&mut storage, block2); - let block3 = HeaderBuilder::with_parent_number(2).sign_by_set(&validators); - insert_header(&mut storage, block3); - - FinalizedBlock::::put(block2_id); - - let validators_config = - ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); - let (header, receipts) = make_header(&validators); - accept_aura_header_into_pool( - &storage, - &test_aura_config(), - &validators_config, - &pool_configuration(), - &header, - &(), - receipts.as_ref(), - ) - }) - } - - fn change_validators_set_at(number: u64, finalized_set: Vec
, signalled_set: Option>) { - let set_id = NextValidatorsSetId::::get(); - NextValidatorsSetId::::put(set_id + 1); - ValidatorsSets::::insert( - set_id, - ValidatorsSet { - validators: finalized_set, - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: HeadersByNumber::::get(&0).unwrap()[0], - }, - }, - ); - - let header_hash = HeadersByNumber::::get(&number).unwrap()[0]; - let mut header = Headers::::get(&header_hash).unwrap(); - header.next_validators_set_id = set_id; - if let Some(signalled_set) = signalled_set { - header.last_signal_block = Some(HeaderId { - number: header.header.number - 1, - hash: header.header.parent_hash, - }); - ScheduledChanges::::insert( - header.header.parent_hash, - AuraScheduledChange { - validators: signalled_set, - prev_signal_block: None, - }, - ); - } - - Headers::::insert(header_hash, header); - } - - #[test] - fn verifies_seal_count() { - // when there are no seals at all - let mut header = AuraHeader::default(); - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's single seal (we expect 2 or 3 seals) - header.seal = vec![vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 3 seals (we expect 2 by default) - header.seal = vec![vec![], vec![], vec![]]; - assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); - - // when there's 2 seals - header.seal = vec![vec![], vec![]]; - assert_ne!(default_verify(&header), Err(Error::InvalidSealArity)); - } - - #[test] - fn verifies_header_number() { - // when number is u64::max_value() - let header = HeaderBuilder::with_number(u64::max_value()).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::RidiculousNumber)); - - // when header is < u64::max_value() - let header = HeaderBuilder::with_number(u64::max_value() - 1).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::RidiculousNumber)); - } - - #[test] - fn verifies_gas_used() { - // when gas used is larger than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT + 1).into()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TooMuchGasUsed)); - - // when gas used is less than gas limit - let header = HeaderBuilder::with_number(1) - .gas_used((GAS_LIMIT - 1).into()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TooMuchGasUsed)); - } - - #[test] - fn verifies_gas_limit() { - let mut config = test_aura_config(); - config.min_gas_limit = 100.into(); - config.max_gas_limit = 200.into(); - - // when limit is lower than expected - let header = HeaderBuilder::with_number(1) - .gas_limit(50.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is larger than expected - let header = HeaderBuilder::with_number(1) - .gas_limit(250.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - - // when limit is within expected range - let header = HeaderBuilder::with_number(1) - .gas_limit(150.into()) - .sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); - } - - #[test] - fn verifies_extra_data_len() { - // when extra data is too large - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(1000).collect::>()) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - - // when extra data size is OK - let header = HeaderBuilder::with_number(1) - .extra_data(std::iter::repeat(42).take(10).collect::>()) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); - } - - #[test] - fn verifies_timestamp() { - // when timestamp overflows i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::max_value() as u64 + 1) - .sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::TimestampOverflow)); - - // when timestamp doesn't overflow i32 - let header = HeaderBuilder::with_number(1) - .timestamp(i32::max_value() as u64) - .sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::TimestampOverflow)); - } - - #[test] - fn verifies_chain_time() { - // expected import context after verification - let expect = ImportContext:: { - submitter: None, - parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(), - parent_header: genesis(), - parent_total_difficulty: U256::zero(), - parent_scheduled_change: None, - validators_set_id: 0, - validators_set: ValidatorsSet { - validators: vec![ - hex!("dc5b20847f43d67928f49cd4f85d696b5a7617b5").into(), - hex!("897df33a7b3c62ade01e22c13d48f98124b4480f").into(), - hex!("05c987b34c6ef74e0c7e69c6e641120c24164c2d").into(), - ], - signal_block: None, - enact_block: HeaderId { - number: 0, - hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(), - }, - }, - last_signal_block: None, - }; - - // header is behind - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2 - 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - - // header is ahead - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2 + 100) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header), Err(Error::HeaderTimestampIsAhead)); - - // header has same timestamp as ConstChainTime - let header = HeaderBuilder::with_parent(&genesis()) - .timestamp(i32::max_value() as u64 / 2) - .sign_by(&validator(1)); - assert_eq!(default_verify(&header).unwrap(), expect); - } - - #[test] - fn verifies_parent_existence() { - // when there's no parent in the storage - let header = HeaderBuilder::with_number(1).sign_by(&validator(0)); - assert_eq!(default_verify(&header), Err(Error::MissingParentBlock)); - - // when parent is in the storage - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(default_verify(&header), Err(Error::MissingParentBlock)); - } - - #[test] - fn verifies_step() { - // when step is missing from seals - let mut header = AuraHeader { - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - parent_hash: genesis().compute_hash(), - ..Default::default() - }; - assert_eq!(default_verify(&header), Err(Error::MissingStep)); - - // when step is the same as for the parent block - header.seal[0] = rlp_encode(&42u64).to_vec(); - assert_eq!(default_verify(&header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&43u64).to_vec(); - assert_ne!(default_verify(&header), Err(Error::DoubleVote)); - - // now check with validate_step check enabled - let mut config = test_aura_config(); - config.validate_step_transition = 0; - - // when step is lesser that for the parent block - header.seal[0] = rlp_encode(&40u64).to_vec(); - header.seal = vec![vec![40], vec![]]; - assert_eq!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - - // when step is OK - header.seal[0] = rlp_encode(&44u64).to_vec(); - assert_ne!(verify_with_config(&config, &header), Err(Error::DoubleVote)); - } - - #[test] - fn verifies_empty_step() { - let mut config = test_aura_config(); - config.empty_steps_transition = 0; - - // when empty step duplicates parent step - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(0), GENESIS_STEP)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty step signature check fails - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(100), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when we are accepting strict empty steps and they come not in order - config.strict_empty_steps_transition = 0; - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(2), GENESIS_STEP + 2), (&validator(1), GENESIS_STEP + 1)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - - // when empty steps are OK - let header = HeaderBuilder::with_parent(&genesis()) - .empty_steps(&[(&validator(1), GENESIS_STEP + 1), (&validator(2), GENESIS_STEP + 2)]) - .step(GENESIS_STEP + 3) - .sign_by(&validator(3)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); - } - - #[test] - fn verifies_chain_score() { - let mut config = test_aura_config(); - config.validate_score_transition = 0; - - // when chain score is invalid - let header = HeaderBuilder::with_parent(&genesis()) - .difficulty(100.into()) - .sign_by(&validator(0)); - assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - - // when chain score is accepted - let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); - assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); - } - - #[test] - fn verifies_validator() { - let good_header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(1)); - - // when header author is invalid - let mut header = good_header.clone(); - header.author = Default::default(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when header signature is invalid - let mut header = good_header.clone(); - header.seal[1] = rlp_encode(&H520::default()).to_vec(); - assert_eq!(default_verify(&header), Err(Error::NotValidator)); - - // when everything is OK - assert_eq!(default_verify(&good_header).map(|_| ()), Ok(())); - } - - #[test] - fn pool_verifies_known_blocks() { - // when header is known - assert_eq!( - default_accept_into_pool(|validators| (HeaderBuilder::with_parent_number(2).sign_by_set(validators), None)), - Err(Error::KnownHeader), - ); - } - - #[test] - fn pool_verifies_ancient_blocks() { - // when header number is less than finalized - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(1) - .gas_limit((GAS_LIMIT + 1).into()) - .sign_by_set(validators), - None, - ),), - Err(Error::AncientHeader), - ); - } - - #[test] - fn pool_rejects_headers_without_required_receipts() { - assert_eq!( - default_accept_into_pool(|_| ( - AuraHeader { - number: 20_000_000, - seal: vec![vec![], vec![]], - gas_limit: test_aura_config().min_gas_limit, - log_bloom: (&[0xff; 256]).into(), - ..Default::default() - }, - None, - ),), - Err(Error::MissingTransactionsReceipts), - ); - } - - #[test] - fn pool_rejects_headers_with_redundant_receipts() { - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3).sign_by_set(validators), - Some(vec![Receipt { - gas_used: 1.into(), - log_bloom: (&[0xff; 256]).into(), - logs: vec![], - outcome: TransactionOutcome::Unknown, - }]), - ),), - Err(Error::RedundantTransactionsReceipts), - ); - } - - #[test] - fn pool_verifies_future_block_number() { - // when header is too far from the future - assert_eq!( - default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(&validators), None),), - Err(Error::UnsignedTooFarInTheFuture), - ); - } - - #[test] - fn pool_performs_full_verification_when_parent_is_known() { - // if parent is known, then we'll execute contextual_checks, which - // checks for DoubleVote - assert_eq!( - default_accept_into_pool(|validators| ( - HeaderBuilder::with_parent_number(3) - .step(GENESIS_STEP + 3) - .sign_by_set(&validators), - None, - ),), - Err(Error::DoubleVote), - ); - } - - #[test] - fn pool_performs_validators_checks_when_parent_is_unknown() { - // if parent is unknown, then we still need to check if header has required signature - // (even if header will be considered invalid/duplicate later, we can use this signature - // as a proof of malicious action by this validator) - assert_eq!( - default_accept_into_pool(|_| (HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)), None,)), - Err(Error::NotValidator), - ); - } - - #[test] - fn pool_verifies_header_with_known_parent() { - let mut hash = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3).sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, None) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![ - (4u64, validators_addresses(3)[1]).encode(), - (4u64, hash.unwrap()).encode(), - ], - )), - ); - } - - #[test] - fn pool_verifies_header_with_unknown_parent() { - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 5) - .sign_by_set(validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode()], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_uses_next_validators_set_when_finalized_fails() { - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header - change_validators_set_at(3, validators_addresses(1), None); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - - (header, None) - }), - Err(Error::NotValidator), - ); - - let mut id = None; - let mut parent_id = None; - assert_eq!( - default_accept_into_pool(|actual_validators| { - // change finalized set at parent header + signal valid set at parent block - change_validators_set_at(3, validators_addresses(10), Some(validators_addresses(3))); - - // header is signed using wrong set - let header = HeaderBuilder::with_number(5) - .step(GENESIS_STEP + 2) - .sign_by_set(actual_validators); - id = Some(header.compute_id()); - parent_id = header.parent_id(); - - (header, None) - }), - Ok(( - // parent tag required - vec![parent_id.unwrap().encode(),], - // header provides two tags - vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], - )), - ); - } - - #[test] - fn pool_rejects_headers_with_invalid_receipts() { - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .sign_by_set(validators); - (header, Some(vec![validators_change_receipt(Default::default())])) - }), - Err(Error::TransactionsReceiptsMismatch), - ); - } - - #[test] - fn pool_accepts_headers_with_valid_receipts() { - let mut hash = None; - let receipts = vec![validators_change_receipt(Default::default())]; - let receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); - - assert_eq!( - default_accept_into_pool(|validators| { - let header = HeaderBuilder::with_parent_number(3) - .log_bloom((&[0xff; 256]).into()) - .receipts_root(receipts_root) - .sign_by_set(validators); - hash = Some(header.compute_hash()); - (header, Some(receipts.clone())) - }), - Ok(( - // no tags are required - vec![], - // header provides two tags - vec![ - (4u64, validators_addresses(3)[1]).encode(), - (4u64, hash.unwrap()).encode(), - ], - )), - ); - } -} diff --git a/polkadot/bridges/modules/finality-verifier/Cargo.toml b/polkadot/bridges/modules/finality-verifier/Cargo.toml deleted file mode 100644 index 35d4d6880aa94cdcac0cb0bd47b3c5dd5fada16a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/finality-verifier/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "pallet-finality-verifier" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -finality-grandpa = { version = "0.14.0", default-features = false } -serde = { version = "1.0", optional = true } - -# Bridge Dependencies - -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -bp-test-utils = {path = "../../primitives/test-utils" } -pallet-substrate-bridge = { path = "../../modules/substrate" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "bp-header-chain/std", - "codec/std", - "finality-grandpa/std", - "frame-support/std", - "frame-system/std", - "serde", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/modules/finality-verifier/src/lib.rs b/polkadot/bridges/modules/finality-verifier/src/lib.rs deleted file mode 100644 index d799cc27f38aafb2c83476bc31ae2a4403cc47d2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/finality-verifier/src/lib.rs +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate Finality Verifier Pallet -//! -//! The goal of this pallet is to provide a safe interface for writing finalized headers to an -//! external pallet which tracks headers and finality proofs. By safe, we mean that only headers -//! whose finality has been verified will be written to the underlying pallet. -//! -//! By verifying the finality of headers before writing them to storage we prevent DoS vectors in -//! which unfinalized headers get written to storage even if they don't have a chance of being -//! finalized in the future (such as in the case where a different fork gets finalized). -//! -//! The underlying pallet used for storage is assumed to be a pallet which tracks headers and -//! GRANDPA authority set changes. This information is used during the verification of GRANDPA -//! finality proofs. - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use bp_header_chain::{justification::verify_justification, AncestryChecker, HeaderChain}; -use bp_runtime::{Chain, HeaderOf}; -use finality_grandpa::voter_set::VoterSet; -use frame_support::{dispatch::DispatchError, ensure}; -use frame_system::ensure_signed; -use sp_runtime::traits::Header as HeaderT; -use sp_std::vec::Vec; - -#[cfg(test)] -mod mock; - -// Re-export in crate namespace for `construct_runtime!` -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - /// Header of the bridged chain. - pub(crate) type BridgedHeader = HeaderOf<::BridgedChain>; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The chain we are bridging to here. - type BridgedChain: Chain; - - /// The pallet which we will use as our underlying storage mechanism. - type HeaderChain: HeaderChain<::Header, DispatchError>; - - /// The type of ancestry proof used by the pallet. - /// - /// Will be used by the ancestry checker to verify that the header being finalized is - /// related to the best finalized header in storage. - type AncestryProof: Parameter; - - /// The type through which we will verify that a given header is related to the last - /// finalized header in our storage pallet. - type AncestryChecker: AncestryChecker<::Header, Self::AncestryProof>; - - /// The upper bound on the number of requests allowed by the pallet. - /// - /// Once this bound is reached the pallet will not allow any dispatchables to be called - /// until the request count has decreased. - #[pallet::constant] - type MaxRequests: Get; - } - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { - >::mutate(|count| *count = count.saturating_sub(1)); - - (0_u64) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - } - - #[pallet::call] - impl Pallet { - /// Verify a target header is finalized according to the given finality proof. - /// - /// It will use the underlying storage pallet to fetch information about the current - /// authorities and best finalized header in order to verify that the header is finalized. - /// - /// If successful in verification, it will write the target header to the underlying storage - /// pallet. - #[pallet::weight(0)] - pub fn submit_finality_proof( - origin: OriginFor, - finality_target: BridgedHeader, - justification: Vec, - ancestry_proof: T::AncestryProof, - ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; - - ensure!( - Self::request_count() < T::MaxRequests::get(), - >::TooManyRequests - ); - >::mutate(|count| *count += 1); - - frame_support::debug::trace!("Going to try and finalize header {:?}", finality_target); - - let authority_set = T::HeaderChain::authority_set(); - let voter_set = VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; - let set_id = authority_set.set_id; - - let (hash, number) = (finality_target.hash(), *finality_target.number()); - verify_justification::>((hash, number), set_id, voter_set, &justification).map_err( - |e| { - frame_support::debug::error!("Received invalid justification for {:?}: {:?}", finality_target, e); - >::InvalidJustification - }, - )?; - - let best_finalized = T::HeaderChain::best_finalized(); - frame_support::debug::trace!("Checking ancestry against best finalized header: {:?}", &best_finalized); - - ensure!( - T::AncestryChecker::are_ancestors(&best_finalized, &finality_target, &ancestry_proof), - >::InvalidAncestryProof - ); - - T::HeaderChain::append_header(finality_target); - frame_support::debug::info!("Succesfully imported finalized header with hash {:?}!", hash); - - Ok(().into()) - } - } - - /// The current number of requests for calling dispatchables. - /// - /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until - /// the request capacity is increased. - /// - /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure - /// that the pallet can always make progress. - #[pallet::storage] - #[pallet::getter(fn request_count)] - pub(super) type RequestCount = StorageValue<_, u32, ValueQuery>; - - #[pallet::error] - pub enum Error { - /// The given justification is invalid for the given header. - InvalidJustification, - /// The given ancestry proof is unable to verify that the child and ancestor headers are - /// related. - InvalidAncestryProof, - /// The authority set from the underlying header chain is invalid. - InvalidAuthoritySet, - /// Failed to write a header to the underlying header chain. - FailedToWriteHeader, - /// There are too many requests for the current window to handle. - TooManyRequests, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, test_header, Origin, TestRuntime}; - use bp_test_utils::{authority_list, make_justification_for_header}; - use codec::Encode; - use frame_support::{assert_err, assert_ok}; - - fn initialize_substrate_bridge() { - let genesis = test_header(0); - - let init_data = pallet_substrate_bridge::InitializationData { - header: genesis, - authority_list: authority_list(), - set_id: 1, - scheduled_change: None, - is_halted: false, - }; - - assert_ok!(pallet_substrate_bridge::Module::::initialize( - Origin::root(), - init_data - )); - } - - fn submit_finality_proof() -> frame_support::dispatch::DispatchResultWithPostInfo { - let child = test_header(1); - let header = test_header(2); - - let set_id = 1; - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authority_list()).encode(); - let ancestry_proof = vec![child, header.clone()]; - - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof) - } - - fn next_block() { - use frame_support::traits::OnInitialize; - - let current_number = frame_system::Pallet::::block_number(); - frame_system::Pallet::::set_block_number(current_number + 1); - let _ = Module::::on_initialize(current_number); - } - - #[test] - fn succesfully_imports_header_with_valid_finality_and_ancestry_proofs() { - run_test(|| { - initialize_substrate_bridge(); - - assert_ok!(submit_finality_proof()); - - let header = test_header(2); - assert_eq!( - pallet_substrate_bridge::Module::::best_headers(), - vec![(*header.number(), header.hash())] - ); - - assert_eq!(pallet_substrate_bridge::Module::::best_finalized(), header); - }) - } - - #[test] - fn rejects_justification_that_skips_authority_set_transition() { - run_test(|| { - initialize_substrate_bridge(); - - let child = test_header(1); - let header = test_header(2); - - let set_id = 2; - let grandpa_round = 1; - let justification = - make_justification_for_header(&header, grandpa_round, set_id, &authority_list()).encode(); - let ancestry_proof = vec![child, header.clone()]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidJustification - ); - }) - } - - #[test] - fn does_not_import_header_with_invalid_finality_proof() { - run_test(|| { - initialize_substrate_bridge(); - - let child = test_header(1); - let header = test_header(2); - - let justification = [1u8; 32].encode(); - let ancestry_proof = vec![child, header.clone()]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidJustification - ); - }) - } - - #[test] - fn does_not_import_header_with_invalid_ancestry_proof() { - run_test(|| { - initialize_substrate_bridge(); - - let header = test_header(2); - - let set_id = 1; - let grandpa_round = 1; - let justification = - make_justification_for_header(&header, grandpa_round, set_id, &authority_list()).encode(); - - // For testing, we've made it so that an empty ancestry proof is invalid - let ancestry_proof = vec![]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidAncestryProof - ); - }) - } - - #[test] - fn disallows_invalid_authority_set() { - run_test(|| { - use bp_test_utils::{alice, bob}; - - let genesis = test_header(0); - - let invalid_authority_list = vec![(alice(), u64::MAX), (bob(), u64::MAX)]; - let init_data = pallet_substrate_bridge::InitializationData { - header: genesis, - authority_list: invalid_authority_list, - set_id: 1, - scheduled_change: None, - is_halted: false, - }; - - assert_ok!(pallet_substrate_bridge::Module::::initialize( - Origin::root(), - init_data - )); - - let header = test_header(1); - let justification = [1u8; 32].encode(); - let ancestry_proof = vec![]; - - assert_err!( - Module::::submit_finality_proof(Origin::signed(1), header, justification, ancestry_proof,), - >::InvalidAuthoritySet - ); - }) - } - - #[test] - fn disallows_imports_once_limit_is_hit_in_single_block() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - assert_err!(submit_finality_proof(), >::TooManyRequests); - }) - } - - #[test] - fn allows_request_after_new_block_has_started() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - - next_block(); - assert_ok!(submit_finality_proof()); - }) - } - - #[test] - fn disallows_imports_once_limit_is_hit_across_different_blocks() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - - next_block(); - assert_ok!(submit_finality_proof()); - assert_err!(submit_finality_proof(), >::TooManyRequests); - }) - } - - #[test] - fn allows_max_requests_after_long_time_with_no_activity() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - - next_block(); - next_block(); - - next_block(); - assert_ok!(submit_finality_proof()); - assert_ok!(submit_finality_proof()); - }) - } -} diff --git a/polkadot/bridges/modules/finality-verifier/src/mock.rs b/polkadot/bridges/modules/finality-verifier/src/mock.rs deleted file mode 100644 index d87af925990efbc064d67a04782b7684ddb5a977..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/finality-verifier/src/mock.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use crate::pallet::{BridgedHeader, Config}; -use bp_runtime::{BlockNumberOf, Chain}; -use frame_support::{construct_runtime, parameter_types, weights::Weight}; -use sp_runtime::{ - testing::{Header, H256}, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; -pub type TestHeader = BridgedHeader; -pub type TestNumber = BlockNumberOf<::BridgedChain>; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as finality_verifier; - -construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Bridge: pallet_substrate_bridge::{Pallet}, - FinalityVerifier: finality_verifier::{Pallet}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -impl pallet_substrate_bridge::Config for TestRuntime { - type BridgedChain = TestBridgedChain; -} - -parameter_types! { - pub const MaxRequests: u32 = 2; -} - -impl finality_verifier::Config for TestRuntime { - type BridgedChain = TestBridgedChain; - type HeaderChain = pallet_substrate_bridge::Module; - type AncestryProof = Vec<::Header>; - type AncestryChecker = Checker<::Header, Self::AncestryProof>; - type MaxRequests = MaxRequests; -} - -#[derive(Debug)] -pub struct TestBridgedChain; - -impl Chain for TestBridgedChain { - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hashing; - type Header = ::Header; -} - -#[derive(Debug)] -pub struct Checker(std::marker::PhantomData<(H, P)>); - -impl bp_header_chain::AncestryChecker> for Checker> { - fn are_ancestors(_ancestor: &H, _child: &H, proof: &Vec) -> bool { - !proof.is_empty() - } -} - -pub fn run_test(test: impl FnOnce() -> T) -> T { - sp_io::TestExternalities::new(Default::default()).execute_with(test) -} - -pub fn test_header(num: TestNumber) -> TestHeader { - // We wrap the call to avoid explicit type annotations in our tests - bp_test_utils::test_header(num) -} diff --git a/polkadot/bridges/modules/message-lane/Cargo.toml b/polkadot/bridges/modules/message-lane/Cargo.toml deleted file mode 100644 index abbfb6076124f75a96818285f0a8d6ebfb895f86..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/Cargo.toml +++ /dev/null @@ -1,51 +0,0 @@ -[package] -name = "pallet-message-lane" -description = "Module that allows bridged chains to exchange messages using lane concept." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -num-traits = { version = "0.2", default-features = false } -serde = { version = "1.0.101", optional = true, features = ["derive"] } - -# Bridge dependencies - -bp-message-lane = { path = "../../primitives/message-lane", default-features = false } -bp-rialto = { path = "../../primitives/rialto", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -hex-literal = "0.3" -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-message-lane/std", - "bp-runtime/std", - "bp-rialto/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "num-traits/std", - "serde", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking", -] diff --git a/polkadot/bridges/modules/message-lane/README.md b/polkadot/bridges/modules/message-lane/README.md deleted file mode 100644 index a732042bd0d4998c49657127a6e402f76504d1a5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/README.md +++ /dev/null @@ -1,391 +0,0 @@ -# Message Lane Module - -The message lane module is used to deliver messages from source chain to target chain. Message is -(almost) opaque to the module and the final goal is to hand message to the message dispatch -mechanism. - -## Contents -- [Overview](#overview) -- [Message Workflow](#message-workflow) -- [Integrating Message Lane Module into Runtime](#integrating-message-lane-module-into-runtime) -- [Non-Essential Functionality](#non-essential-functionality) -- [Weights of Module Extrinsics](#weights-of-module-extrinsics) - -## Overview - -Message lane is an unidirectional channel, where messages are sent from source chain to the target -chain. At the same time, a single instance of message lane module supports both outbound lanes and -inbound lanes. So the chain where the module is deployed (this chain), may act as a source chain for -outbound messages (heading to a bridged chain) and as a target chain for inbound messages (coming -from a bridged chain). - -Message lane module supports multiple message lanes. Every message lane is identified with a 4-byte -identifier. Messages sent through the lane are assigned unique (for this lane) increasing integer -value that is known as nonce ("number that can only be used once"). Messages that are sent over the -same lane are guaranteed to be delivered to the target chain in the same order they're sent from -the source chain. In other words, message with nonce `N` will be delivered right before delivering a -message with nonce `N+1`. - -Single message lane may be seen as a transport channel for single application (onchain, offchain or -mixed). At the same time the module itself never dictates any lane or message rules. In the end, it -is the runtime developer who defines what message lane and message mean for this runtime. - -## Message Workflow - -The message "appears" when its submitter calls the `send_message()` function of the module. The -submitter specifies the lane that he's willing to use, the message itself and the fee that he's -willing to pay for the message delivery and dispatch. If a message passes all checks, the nonce is -assigned and the message is stored in the module storage. The message is in an "undelivered" state -now. - -We assume that there are external, offchain actors, called relayers, that are submitting module -related transactions to both target and source chains. The pallet itself has no assumptions about -relayers incentivization scheme, but it has some callbacks for paying rewards. See -[Integrating Message Lane Module into runtime](#Integrating-Message-Lane-Module-into-runtime) -for details. - -Eventually, some relayer would notice this message in the "undelivered" state and it would decide to -deliver this message. Relayer then crafts `receive_messages_proof()` transaction (aka delivery -transaction) for the message lane module instance, deployed at the target chain. Relayer provides -his account id at the source chain, the proof of message (or several messages), the number of -messages in the transaction and their cumulative dispatch weight. Once a transaction is mined, the -message is considered "delivered". - -Once a message is delivered, the relayer may want to confirm delivery back to the source chain. -There are two reasons why he would want to do that. The first is that we intentionally limit number -of "delivered", but not yet "confirmed" messages at inbound lanes -(see [What about other Constants in the Message Lane Module Configuration Trait](#What-about-other-Constants-in-the-Message-Lane-Module-Configuration-Trait) for explanation). -So at some point, the target chain may stop accepting new messages until relayers confirm some of -these. The second is that if the relayer wants to be rewarded for delivery, he must prove the fact -that he has actually delivered the message. And this proof may only be generated after the delivery -transaction is mined. So relayer crafts the `receive_messages_delivery_proof()` transaction (aka -confirmation transaction) for the message lane module instance, deployed at the source chain. Once -this transaction is mined, the message is considered "confirmed". - -The "confirmed" state is the final state of the message. But there's one last thing related to the -message - the fact that it is now "confirmed" and reward has been paid to the relayer (or at least -callback for this has been called), must be confirmed to the target chain. Otherwise, we may reach -the limit of "unconfirmed" messages at the target chain and it will stop accepting new messages. So -relayer sometimes includes a nonce of the latest "confirmed" message in the next -`receive_messages_proof()` transaction, proving that some messages have been confirmed. - -## Integrating Message Lane Module into Runtime - -As it has been said above, the message lane module supports both outbound and inbound message lanes. -So if we will integrate a module in some runtime, it may act as the source chain runtime for -outbound messages and as the target chain runtime for inbound messages. In this section, we'll -sometimes refer to the chain we're currently integrating with, as this chain and the other chain as -bridged chain. - -Message lane module doesn't simply accept transactions that are claiming that the bridged chain has -some updated data for us. Instead of this, the module assumes that the bridged chain is able to -prove that updated data in some way. The proof is abstracted from the module and may be of any kind. -In our Substrate-to-Substrate bridge we're using runtime storage proofs. Other bridges may use -transaction proofs, Substrate header digests or anything else that may be proved. - -**IMPORTANT NOTE**: everything below in this chapter describes details of the message lane module -configuration. But if you interested in well-probed and relatively easy integration of two -Substrate-based chains, you may want to look at the -[bridge-runtime-common](../../bin/runtime-common/README.md) crate. This crate is providing a lot of -helpers for integration, which may be directly used from within your runtime. Then if you'll decide -to change something in this scheme, get back here for detailed information. - -### General Information - -The message lane module supports instances. Every module instance is supposed to bridge this chain -and some bridged chain. To bridge with another chain, using another instance is suggested (this -isn't forced anywhere in the code, though). - -Message submitters may track message progress by inspecting module events. When Message is accepted, -the `MessageAccepted` event is emitted in the `send_message()` transaction. The event contains both -message lane identifier and nonce that has been assigned to the message. When a message is delivered -to the target chain, the `MessagesDelivered` event is emitted from the -`receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane -identifier and inclusive range of delivered message nonces. - -### How to plug-in Message Lane Module to Send Messages to the Bridged Chain? - -The `pallet_message_lane::Config` trait has 3 main associated types that are used to work with -outbound messages. The `pallet_message_lane::Config::TargetHeaderChain` defines how we see the -bridged chain as the target for our outbound messages. It must be able to check that the bridged -chain may accept our message - like that the message has size below maximal possible transaction -size of the chain and so on. And when the relayer sends us a confirmation transaction, this -implementation must be able to parse and verify the proof of messages delivery. Normally, you would -reuse the same (configurable) type on all chains that are sending messages to the same bridged -chain. - -The `pallet_message_lane::Config::LaneMessageVerifier` defines a single callback to verify outbound -messages. The simplest callback may just accept all messages. But in this case you'll need to answer -many questions first. Who will pay for the delivery and confirmation transaction? Are we sure that -someone will ever deliver this message to the bridged chain? Are we sure that we don't bloat our -runtime storage by accepting this message? What if the message is improperly encoded or has some -fields set to invalid values? Answering all those (and similar) questions would lead to correct -implementation. - -There's another thing to consider when implementing type for use in -`pallet_message_lane::Config::LaneMessageVerifier`. It is whether we treat all message lanes -identically, or they'll have different sets of verification rules? For example, you may reserve -lane#1 for messages coming from some 'wrapped-token' pallet - then you may verify in your -implementation that the origin is associated with this pallet. Lane#2 may be reserved for 'system' -messages and you may charge zero fee for such messages. You may have some rate limiting for messages -sent over the lane#3. Or you may just verify the same rules set for all outbound messages - it is -all up to the `pallet_message_lane::Config::LaneMessageVerifier` implementation. - -The last type is the `pallet_message_lane::Config::MessageDeliveryAndDispatchPayment`. When all -checks are made and we have decided to accept the message, we're calling the -`pay_delivery_and_dispatch_fee()` callback, passing the corresponding argument of the `send_message` -function. Later, when message delivery is confirmed, we're calling `pay_relayers_rewards()` -callback, passing accounts of relayers and messages that they have delivered. The simplest -implementation of this trait is in the [`instant_payments.rs`](./src/instant_payments.rs) module and -simply calls `Currency::transfer()` when those callbacks are called. So `Currency` units are -transferred between submitter, 'relayers fund' and relayers accounts. Other implementations may use -more or less sophisticated techniques - the whole relayers incentivization scheme is not a part of -the message lane module. - -### I have a Message Lane Module in my Runtime, but I Want to Reject all Outbound Messages. What shall I do? - -You should be looking at the `bp_message_lane::source_chain::ForbidOutboundMessages` structure -[`bp_message_lane::source_chain`](../../primitives/message-lane/src/source_chain.rs). It implements -all required traits and will simply reject all transactions, related to outbound messages. - -### How to plug-in Message Lane Module to Receive Messages from the Bridged Chain? - -The `pallet_message_lane::Config` trait has 2 main associated types that are used to work with -inbound messages. The `pallet_message_lane::Config::SourceHeaderChain` defines how we see the -bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction, -this implementation must be able to parse and verify the proof of messages wrapped in this -transaction. Normally, you would reuse the same (configurable) type on all chains that are sending -messages to the same bridged chain. - -The `pallet_message_lane::Config::MessageDispatch` defines a way on how to dispatch delivered -messages. Apart from actually dispatching the message, the implementation must return the correct -dispatch weight of the message before dispatch is called. - -### I have a Message Lane Module in my Runtime, but I Want to Reject all Inbound Messages. What -shall I do? - -You should be looking at the `bp_message_lane::target_chain::ForbidInboundMessages` structure from -the [`bp_message_lane::target_chain`](../../primitives/message-lane/src/target_chain.rs) module. It -implements all required traits and will simply reject all transactions, related to inbound messages. - -### What about other Constants in the Message Lane Module Configuration Trait? - -Message is being stored in the source chain storage until its delivery will be confirmed. After -that, we may safely remove the message from the storage. Lane messages are removed (pruned) when -someone sends a new message using the same lane. So the message submitter pays for that pruning. To -avoid pruning too many messages in a single transaction, there's -`pallet_message_lane::Config::MaxMessagesToPruneAtOnce` configuration parameter. We will never prune -more than this number of messages in the single transaction. That said, the value should not be too -big to avoid waste of resources when there are no messages to prune. - -To be able to reward the relayer for delivering messages, we store a map of message nonces range => -identifier of the relayer that has delivered this range at the target chain runtime storage. If a -relayer delivers multiple consequent ranges, they're merged into single entry. So there may be more -than one entry for the same relayer. Eventually, this whole map must be delivered back to the source -chain to confirm delivery and pay rewards. So to make sure we are able to craft this confirmation -transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure that -the weight of processing this map is below a certain limit. Both size and processing weight mostly -depend on the number of entries. The number of entries is limited with the -`pallet_message_lane::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight -also depends on the total number of messages that are being confirmed, because every confirmed -message needs to be read. So there's another -`pallet_message_lane::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. - -When choosing values for these parameters, you must also keep in mind that if proof in your scheme -is based on finality of headers (and it is the most obvious option for Substrate-based chains with -finality notion), then choosing too small values for these parameters may cause significant delays -in message delivery. That's because there too many actors involved in this scheme: 1) authorities -that are finalizing headers of the target chain need to finalize header with non-empty map; 2) the -headers relayer then needs to submit this header and its finality proof to the source chain; 3) the -messages relayer must then send confirmation transaction (storage proof of this map) to the source -chain; 4) when the confirmation transaction will be mined at some header, source chain authorities -must finalize this header; 5) the headers relay then needs to submit this header and its finality -proof to the target chain; 6) only now the messages relayer may submit new messages from the source -to target chain and prune the entry from the map. - -Delivery transaction requires the relayer to provide both number of entries and total number of -messages in the map. This means that the module never charges an extra cost for delivering a map - -the relayer would need to pay exactly for the number of entries+messages it has delivered. So the -best guess for values of these parameters would be the pair that would occupy `N` percent of the -maximal transaction size and weight of the source chain. The `N` should be large enough to process -large maps, at the same time keeping reserve for future source chain upgrades. - -## Non-Essential Functionality - -Apart from the message related calls, the module exposes a set of auxiliary calls. They fall in two -groups, described in the next two paragraphs. - -There may be a special account in every runtime where the message lane module is deployed. This -account, named 'module owner', is like a module-level sudo account - he's able to halt all and -result all module operations without requiring runtime upgrade. The module may have no message -owner, but we suggest to use it at least for initial deployment. To calls that are related to this -account are: -- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; -- `fn halt_operations()`: the module owner (or sudo account) may call this function to stop all - module operations. After this call, all message-related transactions will be rejected until - further `resume_operations` call'. This call may be used when something extraordinary happens with - the bridge; -- `fn resume_operations()`: module owner may call this function to resume bridge operations. The - module will resume its regular operations after this call. - -Apart from halting and resuming the bridge, the module owner may also tune module configuration -parameters without runtime upgrades. The set of parameters needs to be designed in advance, though. -The module configuration trait has associated `Parameter` type, which may be e.g. enum and represent -a set of parameters that may be updated by the module owner. For example, if your bridge needs to -convert sums between different tokens, you may define a 'conversion rate' parameter and let the -module owner update this parameter when there are significant changes in the rate. The corresponding -module call is `fn update_pallet_parameter()`. - -## Weights of Module Extrinsics - -The main assumptions behind weight formulas is: -- all possible costs are paid in advance by the message submitter; -- whenever possible, relayer tries to minimize cost of its transactions. So e.g. even though sender - always pays for delivering outbound lane state proof, relayer may not include it in the delivery - transaction (unless message lane module on target chain requires that); -- weight formula should incentivize relayer to not to submit any redundant data in the extrinsics - arguments; -- the extrinsic shall never be executing slower (i.e. has larger actual weight) than defined by the - formula. - -### Weight of `send_message` call - -#### Related benchmarks - -| Benchmark | Description | -|-----------------------------------|-----------------------------------------------------| -`send_minimal_message_worst_case` | Sends 0-size message with worst possible conditions | -`send_1_kb_message_worst_case` | Sends 1KB-size message with worst possible conditions | -`send_16_kb_message_worst_case` | Sends 16KB-size message with worst possible conditions | - -#### Weight formula - -The weight formula is: -``` -Weight = BaseWeight + MessageSizeInKilobytes * MessageKiloByteSendWeight -``` - -Where: - -| Component | How it is computed? | Description | -|-----------------------------|------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| `SendMessageOverhead` | `send_minimal_message_worst_case` | Weight of sending minimal (0 bytes) message | -| `MessageKiloByteSendWeight` | `(send_16_kb_message_worst_case - send_1_kb_message_worst_case)/15` | Weight of sending every additional kilobyte of the message | - -### Weight of `receive_messages_proof` call - -#### Related benchmarks - -| Benchmark | Description* | -|---------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------| -| `receive_single_message_proof` | Receives proof of single `EXPECTED_DEFAULT_MESSAGE_LENGTH` message | -| `receive_two_messages_proof` | Receives proof of two identical `EXPECTED_DEFAULT_MESSAGE_LENGTH` messages | -| `receive_single_message_proof_with_outbound_lane_state` | Receives proof of single `EXPECTED_DEFAULT_MESSAGE_LENGTH` message and proof of outbound lane state at the source chain | -| `receive_single_message_proof_1_kb` | Receives proof of single message. The proof has size of approximately 1KB** | -| `receive_single_message_proof_16_kb` | Receives proof of single message. The proof has size of approximately 16KB** | - -*\* - In all benchmarks all received messages are dispatched and their dispatch cost is near to zero* - -*\*\* - Trie leafs are assumed to have minimal values. The proof is derived from the minimal proof -by including more trie nodes. That's because according to `receive_message_proofs_with_large_leaf` -and `receive_message_proofs_with_extra_nodes` benchmarks, increasing proof by including more nodes -has slightly larger impact on performance than increasing values stored in leafs*. - -#### Weight formula - -The weight formula is: -``` -Weight = BaseWeight + OutboundStateDeliveryWeight - + MessagesCount * MessageDeliveryWeight - + MessagesDispatchWeight - + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight -``` - -Where: - -| Component | How it is computed? | Description | -|-------------------------------|------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `BaseWeight` | `2*receive_single_message_proof - receive_two_messages_proof` | Weight of receiving and parsing minimal proof | -| `OutboundStateDeliveryWeight` | `receive_single_message_proof_with_outbound_lane_state - receive_single_message_proof` | Additional weight when proof includes outbound lane state | -| `MessageDeliveryWeight` | `receive_two_messages_proof - receive_single_message_proof` | Weight of of parsing and dispatching (without actual dispatch cost) of every message | -| `MessagesCount` | | Provided by relayer | -| `MessagesDispatchWeight` | | Provided by relayer | -| `ActualProofSize` | | Provided by relayer | -| `ExpectedProofSize` | `EXPECTED_DEFAULT_MESSAGE_LENGTH * MessagesCount + EXTRA_STORAGE_PROOF_SIZE` | Size of proof that we are expecting. This only includes `EXTRA_STORAGE_PROOF_SIZE` once, because we assume that intermediate nodes likely to be included in the proof only once. This may be wrong, but since weight of processing proof with many nodes is almost equal to processing proof with large leafs, additional cost will be covered because we're charging for extra proof bytes anyway | -| `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)` | Weight of processing every additional proof byte over `ExpectedProofSize` limit | - -#### Why for every message sent using `send_message` we will be able to craft `receive_messages_proof` transaction? - -We have following checks in `send_message` transaction on the source chain: -- message size should be less than or equal to `2/3` of maximal extrinsic size on the target chain; -- message dispatch weight should be less than or equal to the `1/2` of maximal extrinsic dispatch - weight on the target chain. - -Delivery transaction is an encoded delivery call and signed extensions. So we have `1/3` of maximal -extrinsic size reserved for: -- storage proof, excluding the message itself. Currently, on our test chains, the overhead is always - within `EXTRA_STORAGE_PROOF_SIZE` limits (1024 bytes); -- signed extras and other call arguments (`relayer_id: SourceChain::AccountId`, `messages_count: - u32`, `dispatch_weight: u64`). - -On Millau chain, maximal extrinsic size is `0.75 * 2MB`, so `1/3` is `512KB` (`524_288` bytes). This -should be enough to cover these extra arguments and signed extensions. - -Let's exclude message dispatch cost from single message delivery transaction weight formula: -``` -Weight = BaseWeight + OutboundStateDeliveryWeight + MessageDeliveryWeight - + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight -``` - -So we have `1/2` of maximal extrinsic weight to cover these components. `BaseWeight`, -`OutboundStateDeliveryWeight` and `MessageDeliveryWeight` are determined using benchmarks and are -hardcoded into runtime. Adequate relayer would only include required trie nodes into the proof. So -if message size would be maximal (`2/3` of `MaximalExtrinsicSize`), then the extra proof size would -be `MaximalExtrinsicSize / 3 * 2 - EXPECTED_DEFAULT_MESSAGE_LENGTH`. - -Both conditions are verified by `pallet_message_lane::ensure_weights_are_correct` and -`pallet_message_lane::ensure_able_to_receive_messages` functions, which must be called from every -runtime's tests. - -### Weight of `receive_messages_delivery_proof` call - -#### Related benchmarks - -| Benchmark | Description | -|-------------------------------------------------------------|------------------------------------------------------------------------------------------| -| `receive_delivery_proof_for_single_message` | Receives proof of single message delivery | -| `receive_delivery_proof_for_two_messages_by_single_relayer` | Receives proof of two messages delivery. Both messages are delivered by the same relayer | -| `receive_delivery_proof_for_two_messages_by_two_relayers` | Receives proof of two messages delivery. Messages are delivered by different relayers | - -#### Weight formula - -The weight formula is: -``` -Weight = BaseWeight + MessagesCount * MessageConfirmationWeight - + RelayersCount * RelayerRewardWeight - + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight -``` - -Where: - -| Component | How it is computed? | Description | -|---------------------------|-----------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `BaseWeight` | `2*receive_delivery_proof_for_single_message - receive_delivery_proof_for_two_messages_by_single_relayer` | Weight of receiving and parsing minimal delivery proof | -| `MessageDeliveryWeight` | `receive_delivery_proof_for_two_messages_by_single_relayer - receive_delivery_proof_for_single_message` | Weight of confirming every additional message | -| `MessagesCount` | | Provided by relayer | -| `RelayerRewardWeight` | `receive_delivery_proof_for_two_messages_by_two_relayers - receive_delivery_proof_for_two_messages_by_single_relayer` | Weight of rewarding every additional relayer | -| `RelayersCount` | | Provided by relayer | -| `ActualProofSize` | | Provided by relayer | -| `ExpectedProofSize` | `EXTRA_STORAGE_PROOF_SIZE` | Size of proof that we are expecting | -| `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)` | Weight of processing every additional proof byte over `ExpectedProofSize` limit. We're using the same formula, as for message delivery, because proof mechanism is assumed to be the same in both cases | - -#### Why we're always able to craft `receive_messages_delivery_proof` transaction? - -There can be at most `::MaxUnconfirmedMessagesAtInboundLane` -messages and at most -`::MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded -relayers in the single delivery confirmation transaction. - -We're checking that this transaction may be crafted in the -`pallet_message_lane::ensure_able_to_receive_confirmation` function, which must be called from every -runtime' tests. diff --git a/polkadot/bridges/modules/message-lane/rpc/Cargo.toml b/polkadot/bridges/modules/message-lane/rpc/Cargo.toml deleted file mode 100644 index 23dac80b407c02b7ff0ab90ca3ae1169dec90d8a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/rpc/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "pallet-message-lane-rpc" -description = "Module that provides RPC methods specific to message-lane pallet." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -derive_more = "0.99.2" -futures = { version = "0.3.5", features = ["compat"] } -jsonrpc-core = "15.1.0" -jsonrpc-core-client = "15.1.0" -jsonrpc-derive = "15.1.0" -log = "0.4.11" - -# Bridge dependencies - -bp-runtime = { path = "../../../primitives/runtime" } -bp-message-lane = { path = "../../../primitives/message-lane" } - -# Substrate Dependencies - -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/modules/message-lane/rpc/src/error.rs b/polkadot/bridges/modules/message-lane/rpc/src/error.rs deleted file mode 100644 index 74fd829fcdb3b6f6464ecb21bfad69114916c012..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/rpc/src/error.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Possible errors and results of message-lane RPC calls. - -/// Future Result type. -pub type FutureResult = jsonrpc_core::BoxFuture; - -/// State RPC errors. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - /// When unknown instance id is passed. - #[display(fmt = "Message lane instance is unknown")] - UnknownInstance, - /// Client error. - #[display(fmt = "Client error: {}", _0)] - Client(Box), -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::UnknownInstance => None, - Error::Client(ref err) => Some(&**err), - } - } -} - -impl From for jsonrpc_core::Error { - fn from(e: Error) -> Self { - const UNKNOW_INSTANCE_CODE: i64 = 1; - - match e { - Error::UnknownInstance => jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(UNKNOW_INSTANCE_CODE), - message: "Unknown instance passed".into(), - data: None, - }, - Error::Client(e) => jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::InternalError, - message: format!("Unknown error occured: {}", e), - data: Some(format!("{:?}", e).into()), - }, - } - } -} diff --git a/polkadot/bridges/modules/message-lane/rpc/src/lib.rs b/polkadot/bridges/modules/message-lane/rpc/src/lib.rs deleted file mode 100644 index 8532ed0c5d0e7708c574db2f9abeccf228c5bd5d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/rpc/src/lib.rs +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module that provides RPC methods specific to message-lane pallet. - -use crate::error::{Error, FutureResult}; - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; -use futures::{FutureExt, TryFutureExt}; -use jsonrpc_core::futures::Future as _; -use jsonrpc_derive::rpc; -use sc_client_api::Backend as BackendT; -use sp_blockchain::{Error as BlockchainError, HeaderBackend}; -use sp_core::{storage::StorageKey, Bytes}; -use sp_runtime::{codec::Encode, generic::BlockId, traits::Block as BlockT}; -use sp_state_machine::prove_read; -use sp_trie::StorageProof; -use std::sync::Arc; - -mod error; - -/// Trie-based storage proof that the message(s) with given key(s) have been sent by the bridged chain. -/// SCALE-encoded trie nodes array `Vec>`. -pub type MessagesProof = Bytes; - -/// Trie-based storage proof that the message(s) with given key(s) have been received by the bridged chain. -/// SCALE-encoded trie nodes array `Vec>`. -pub type MessagesDeliveryProof = Bytes; - -/// Runtime adapter. -pub trait Runtime: Send + Sync + 'static { - /// Return runtime storage key for given message. May return None if instance is unknown. - fn message_key(&self, instance: &InstanceId, lane: &LaneId, nonce: MessageNonce) -> Option; - /// Return runtime storage key for outbound lane state. May return None if instance is unknown. - fn outbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option; - /// Return runtime storage key for inbound lane state. May return None if instance is unknown. - fn inbound_lane_data_key(&self, instance: &InstanceId, lane: &LaneId) -> Option; -} - -/// Provides RPC methods for interacting with message-lane pallet. -#[rpc] -pub trait MessageLaneApi { - /// Returns storage proof of messages in given inclusive range. The state of outbound - /// lane is included in the proof if `include_outbound_lane_state` is true. - #[rpc(name = "messageLane_proveMessages")] - fn prove_messages( - &self, - instance: InstanceId, - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - include_outbound_lane_state: bool, - block: Option, - ) -> FutureResult; - - /// Returns proof-of-message(s) delivery. - #[rpc(name = "messageLane_proveMessagesDelivery")] - fn prove_messages_delivery( - &self, - instance: InstanceId, - lane: LaneId, - block: Option, - ) -> FutureResult; -} - -/// Implements the MessageLaneApi trait for interacting with message lanes. -pub struct MessageLaneRpcHandler { - backend: Arc, - runtime: Arc, - _phantom: std::marker::PhantomData, -} - -impl MessageLaneRpcHandler { - /// Creates new mesage lane RPC handler. - pub fn new(backend: Arc, runtime: Arc) -> Self { - Self { - backend, - runtime, - _phantom: Default::default(), - } - } -} - -impl MessageLaneApi for MessageLaneRpcHandler -where - Block: BlockT, - Backend: BackendT + 'static, - R: Runtime, -{ - fn prove_messages( - &self, - instance: InstanceId, - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - include_outbound_lane_state: bool, - block: Option, - ) -> FutureResult { - let runtime = self.runtime.clone(); - let outbound_lane_data_key = if include_outbound_lane_state { - Some(runtime.outbound_lane_data_key(&instance, &lane)) - } else { - None - }; - let messages_count = if end >= begin { end - begin + 1 } else { 0 }; - Box::new( - prove_keys_read( - self.backend.clone(), - block, - (begin..=end) - .map(move |nonce| runtime.message_key(&instance, &lane, nonce)) - .chain(outbound_lane_data_key.into_iter()), - ) - .boxed() - .compat() - .map(move |proof| { - let serialized_proof = serialize_storage_proof(proof); - log::trace!( - "Generated proof of {} messages. Size: {}", - messages_count, - serialized_proof.len() - ); - serialized_proof - }) - .map_err(Into::into), - ) - } - - fn prove_messages_delivery( - &self, - instance: InstanceId, - lane: LaneId, - block: Option, - ) -> FutureResult { - Box::new( - prove_keys_read( - self.backend.clone(), - block, - vec![self.runtime.inbound_lane_data_key(&instance, &lane)], - ) - .boxed() - .compat() - .map(|proof| { - let serialized_proof = serialize_storage_proof(proof); - log::trace!("Generated message delivery proof. Size: {}", serialized_proof.len()); - serialized_proof - }) - .map_err(Into::into), - ) - } -} - -async fn prove_keys_read( - backend: Arc, - block: Option, - keys: impl IntoIterator>, -) -> Result -where - Block: BlockT, - Backend: BackendT + 'static, -{ - let block = unwrap_or_best(&*backend, block); - let state = backend.state_at(BlockId::Hash(block)).map_err(blockchain_err)?; - let keys = keys - .into_iter() - .map(|key| key.ok_or(Error::UnknownInstance).map(|key| key.0)) - .collect::, _>>()?; - let storage_proof = prove_read(state, keys) - .map_err(BlockchainError::Execution) - .map_err(blockchain_err)?; - Ok(storage_proof) -} - -fn serialize_storage_proof(proof: StorageProof) -> Bytes { - let raw_nodes: Vec> = proof.iter_nodes().map(Into::into).collect(); - raw_nodes.encode().into() -} - -fn unwrap_or_best(backend: &impl BackendT, block: Option) -> Block::Hash { - match block { - Some(block) => block, - None => backend.blockchain().info().best_hash, - } -} - -fn blockchain_err(err: BlockchainError) -> Error { - Error::Client(Box::new(err)) -} diff --git a/polkadot/bridges/modules/message-lane/src/benchmarking.rs b/polkadot/bridges/modules/message-lane/src/benchmarking.rs deleted file mode 100644 index cd59d1347f2699655b889e1b77d2b5cfd9f336aa..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/benchmarking.rs +++ /dev/null @@ -1,830 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Message lane pallet benchmarking. - -use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH; -use crate::{inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, Call, Instance}; - -use bp_message_lane::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, InboundLaneData, LaneId, MessageData, - MessageNonce, OutboundLaneData, UnrewardedRelayersState, -}; -use frame_benchmarking::{account, benchmarks_instance}; -use frame_support::{traits::Get, weights::Weight}; -use frame_system::RawOrigin; -use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, ops::RangeInclusive, prelude::*}; - -/// Fee paid by submitter for single message delivery. -pub const MESSAGE_FEE: u64 = 10_000_000_000; - -const SEED: u32 = 0; - -/// Module we're benchmarking here. -pub struct Module, I: crate::Instance>(crate::Module); - -/// Proof size requirements. -pub enum ProofSize { - /// The proof is expected to be minimal. If value size may be changed, then it is expected to - /// have given size. - Minimal(u32), - /// The proof is expected to have at least given size and grow by increasing number of trie nodes - /// included in the proof. - HasExtraNodes(u32), - /// The proof is expected to have at least given size and grow by increasing value that is stored - /// in the trie. - HasLargeLeaf(u32), -} - -/// Benchmark-specific message parameters. -pub struct MessageParams { - /// Size of the message payload. - pub size: u32, - /// Message sender account. - pub sender_account: ThisAccountId, -} - -/// Benchmark-specific message proof parameters. -pub struct MessageProofParams { - /// Id of the lane. - pub lane: LaneId, - /// Range of messages to include in the proof. - pub message_nonces: RangeInclusive, - /// If `Some`, the proof needs to include this outbound lane data. - pub outbound_lane_data: Option, - /// Proof size requirements. - pub size: ProofSize, -} - -/// Benchmark-specific message delivery proof parameters. -pub struct MessageDeliveryProofParams { - /// Id of the lane. - pub lane: LaneId, - /// The proof needs to include this inbound lane data. - pub inbound_lane_data: InboundLaneData, - /// Proof size requirements. - pub size: ProofSize, -} - -/// Trait that must be implemented by runtime. -pub trait Config: crate::Config { - /// Lane id to use in benchmarks. - fn bench_lane_id() -> LaneId { - Default::default() - } - /// Get maximal size of the message payload. - fn maximal_message_size() -> u32; - /// Return id of relayer account at the bridged chain. - fn bridged_relayer_id() -> Self::InboundRelayer; - /// Return balance of given account. - fn account_balance(account: &Self::AccountId) -> Self::OutboundMessageFee; - /// Create given account and give it enough balance for test purposes. - fn endow_account(account: &Self::AccountId); - /// Prepare message to send over lane. - fn prepare_outbound_message( - params: MessageParams, - ) -> (Self::OutboundPayload, Self::OutboundMessageFee); - /// Prepare messages proof to receive by the module. - fn prepare_message_proof( - params: MessageProofParams, - ) -> ( - >::MessagesProof, - Weight, - ); - /// Prepare messages delivery proof to receive by the module. - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> >::MessagesDeliveryProof; -} - -benchmarks_instance! { - // - // Benchmarks that are used directly by the runtime. - // - - // Benchmark `send_message` extrinsic with the worst possible conditions: - // * outbound lane already has state, so it needs to be read and decoded; - // * relayers fund account does not exists (in practice it needs to exist in production environment); - // * maximal number of messages is being pruned during the call; - // * message size is minimal for the target chain. - // - // Result of this benchmark is used as a base weight for `send_message` call. Then the 'message weight' - // (estimated using `send_half_maximal_message_worst_case` and `send_maximal_message_worst_case`) is - // added. - send_minimal_message_worst_case { - let lane_id = T::bench_lane_id(); - let sender = account("sender", 0, SEED); - T::endow_account(&sender); - - // 'send' messages that are to be pruned when our message is sent - for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { - send_regular_message::(); - } - confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); - - let (payload, fee) = T::prepare_outbound_message(MessageParams { - size: 0, - sender_account: sender.clone(), - }); - }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) - verify { - assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), - T::MaxMessagesToPruneAtOnce::get() + 1, - ); - } - - // Benchmark `send_message` extrinsic with the worst possible conditions: - // * outbound lane already has state, so it needs to be read and decoded; - // * relayers fund account does not exists (in practice it needs to exist in production environment); - // * maximal number of messages is being pruned during the call; - // * message size is 1KB. - // - // With single KB of message size, the weight of the call is increased (roughly) by - // `(send_16_kb_message_worst_case - send_1_kb_message_worst_case) / 15`. - send_1_kb_message_worst_case { - let lane_id = T::bench_lane_id(); - let sender = account("sender", 0, SEED); - T::endow_account(&sender); - - // 'send' messages that are to be pruned when our message is sent - for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { - send_regular_message::(); - } - confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); - - let size = 1024; - assert!( - T::maximal_message_size() > size, - "This benchmark can only be used with runtime that accepts 1KB messages", - ); - - let (payload, fee) = T::prepare_outbound_message(MessageParams { - size, - sender_account: sender.clone(), - }); - }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) - verify { - assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), - T::MaxMessagesToPruneAtOnce::get() + 1, - ); - } - - // Benchmark `send_message` extrinsic with the worst possible conditions: - // * outbound lane already has state, so it needs to be read and decoded; - // * relayers fund account does not exists (in practice it needs to exist in production environment); - // * maximal number of messages is being pruned during the call; - // * message size is 16KB. - // - // With single KB of message size, the weight of the call is increased (roughly) by - // `(send_16_kb_message_worst_case - send_1_kb_message_worst_case) / 15`. - send_16_kb_message_worst_case { - let lane_id = T::bench_lane_id(); - let sender = account("sender", 0, SEED); - T::endow_account(&sender); - - // 'send' messages that are to be pruned when our message is sent - for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { - send_regular_message::(); - } - confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); - - let size = 16 * 1024; - assert!( - T::maximal_message_size() > size, - "This benchmark can only be used with runtime that accepts 16KB messages", - ); - - let (payload, fee) = T::prepare_outbound_message(MessageParams { - size, - sender_account: sender.clone(), - }); - }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) - verify { - assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), - T::MaxMessagesToPruneAtOnce::get() + 1, - ); - } - - // Benchmark `increase_message_fee` with following conditions: - // * message has maximal message; - // * submitter account is killed because its balance is less than ED after payment. - increase_message_fee { - let sender = account("sender", 42, SEED); - T::endow_account(&sender); - - let additional_fee = T::account_balance(&sender); - let lane_id = T::bench_lane_id(); - let nonce = 1; - - send_regular_message_with_payload::(vec![42u8; T::maximal_message_size() as _]); - }: increase_message_fee(RawOrigin::Signed(sender.clone()), lane_id, nonce, additional_fee) - verify { - assert_eq!(T::account_balance(&sender), 0.into()); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // This is base benchmark for all other message delivery benchmarks. - receive_single_message_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 21, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // The weight of single message delivery could be approximated as - // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_two_messages_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=22, - outbound_lane_data: None, - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 22, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof includes outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // The weight of outbound lane state delivery would be - // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_single_message_proof_with_outbound_lane_state { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 21, - latest_received_nonce: 20, - latest_generated_nonce: 21, - }), - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 21, - ); - assert_eq!( - crate::Module::::inbound_latest_confirmed_nonce(T::bench_lane_id()), - 20, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has many redundand trie nodes with total size of approximately 1KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof_1_kb) / 15`. - receive_single_message_proof_1_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::HasExtraNodes(1024), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 21, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has many redundand trie nodes with total size of approximately 16KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // Size of proof grows because it contains extra trie nodes in it. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof) / 15`. - receive_single_message_proof_16_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::HasExtraNodes(16 * 1024), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 21, - ); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * single relayer is rewarded for relaying single message; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // This is base benchmark for all other confirmations delivery benchmarks. - receive_delivery_proof_for_single_message { - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); - let relayer_id: T::AccountId = account("relayer", 0, SEED); - let relayer_balance = T::account_balance(&relayer_id); - T::endow_account(&relayers_fund_id); - - // send message that we're going to confirm - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![(1, 1, relayer_id.clone())].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { - assert_eq!( - T::account_balance(&relayer_id), - relayer_balance + MESSAGE_FEE.into(), - ); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * single relayer is rewarded for relaying two messages; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // Additional weight for paying single-message reward to the same relayer could be computed - // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) - // - weight(receive_delivery_proof_for_single_message)`. - receive_delivery_proof_for_two_messages_by_single_relayer { - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); - let relayer_id: T::AccountId = account("relayer", 0, SEED); - let relayer_balance = T::account_balance(&relayer_id); - T::endow_account(&relayers_fund_id); - - // send message that we're going to confirm - send_regular_message::(); - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 2, - total_messages: 2, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![(1, 2, relayer_id.clone())].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { - ensure_relayer_rewarded::(&relayer_id, &relayer_balance); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * two relayers are rewarded for relaying single message each; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // Additional weight for paying reward to the next relayer could be computed - // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) - // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. - receive_delivery_proof_for_two_messages_by_two_relayers { - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); - let relayer1_id: T::AccountId = account("relayer1", 1, SEED); - let relayer1_balance = T::account_balance(&relayer1_id); - let relayer2_id: T::AccountId = account("relayer2", 2, SEED); - let relayer2_balance = T::account_balance(&relayer2_id); - T::endow_account(&relayers_fund_id); - - // send message that we're going to confirm - send_regular_message::(); - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![ - (1, 1, relayer1_id.clone()), - (2, 2, relayer2_id.clone()), - ].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer1_id.clone()), proof, relayers_state) - verify { - ensure_relayer_rewarded::(&relayer1_id, &relayer1_balance); - ensure_relayer_rewarded::(&relayer2_id, &relayer2_balance); - } - - // - // Benchmarks for manual checks. - // - - // Benchmark `send_message` extrinsic with following conditions: - // * outbound lane already has state, so it needs to be read and decoded; - // * relayers fund account does not exists (in practice it needs to exist in production environment); - // * maximal number of messages is being pruned during the call; - // * message size varies from minimal to maximal for the target chain. - // - // Results of this benchmark may be used to check how message size affects `send_message` performance. - send_messages_of_various_lengths { - let i in 0..T::maximal_message_size().try_into().unwrap_or_default(); - - let lane_id = T::bench_lane_id(); - let sender = account("sender", 0, SEED); - T::endow_account(&sender); - - // 'send' messages that are to be pruned when our message is sent - for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { - send_regular_message::(); - } - confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); - - let (payload, fee) = T::prepare_outbound_message(MessageParams { - size: i as _, - sender_account: sender.clone(), - }); - }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) - verify { - assert_eq!( - crate::Module::::outbound_latest_generated_nonce(T::bench_lane_id()), - T::MaxMessagesToPruneAtOnce::get() + 1, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with multiple minimal-weight messages and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // This benchmarks gives us an approximation of single message delivery weight. It is similar to the - // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. So it may be used - // to verify that the other approximation is correct. - receive_multiple_messages_proof { - let i in 1..64; - - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - let messages_count = i as _; - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=(20 + i as MessageNonce), - outbound_lane_data: None, - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - }); - }: receive_messages_proof( - RawOrigin::Signed(relayer_id_on_target), - relayer_id_on_source, - proof, - messages_count, - dispatch_weight - ) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 20 + i as MessageNonce, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // Results of this benchmark may be used to check how proof size affects `receive_message_proof` performance. - receive_message_proofs_with_extra_nodes { - let i in 0..T::maximal_message_size(); - - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - let messages_count = 1u32; - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::HasExtraNodes(i as _), - }); - }: receive_messages_proof( - RawOrigin::Signed(relayer_id_on_target), - relayer_id_on_source, - proof, - messages_count, - dispatch_weight - ) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 21, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // Results of this benchmark may be used to check how message size affects `receive_message_proof` performance. - receive_message_proofs_with_large_leaf { - let i in 0..T::maximal_message_size(); - - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - let messages_count = 1u32; - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::HasLargeLeaf(i as _), - }); - }: receive_messages_proof( - RawOrigin::Signed(relayer_id_on_target), - relayer_id_on_source, - proof, - messages_count, - dispatch_weight - ) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 21, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with multiple minimal-weight messages and following conditions: - // * proof includes outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // This benchmarks gives us an approximation of outbound lane state delivery weight. It is similar to the - // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. - // So it may be used to verify that the other approximation is correct. - receive_multiple_messages_proof_with_outbound_lane_state { - let i in 1..128; - - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - let messages_count = i as _; - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=20 + i as MessageNonce, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 21, - latest_received_nonce: 20, - latest_generated_nonce: 21, - }), - size: ProofSize::Minimal(0), - }); - }: receive_messages_proof( - RawOrigin::Signed(relayer_id_on_target), - relayer_id_on_source, - proof, - messages_count, - dispatch_weight - ) - verify { - assert_eq!( - crate::Module::::inbound_latest_received_nonce(T::bench_lane_id()), - 20 + i as MessageNonce, - ); - assert_eq!( - crate::Module::::inbound_latest_confirmed_nonce(T::bench_lane_id()), - 20, - ); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic where single relayer delivers multiple messages. - receive_delivery_proof_for_multiple_messages_by_single_relayer { - // there actually should be used value of `MaxUnrewardedRelayerEntriesAtInboundLane` from the bridged - // chain, but we're more interested in additional weight/message than in max weight - let i in 1..T::MaxUnrewardedRelayerEntriesAtInboundLane::get() - .try_into() - .expect("Value of MaxUnrewardedRelayerEntriesAtInboundLane is too large"); - - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); - let relayer_id: T::AccountId = account("relayer", 0, SEED); - let relayer_balance = T::account_balance(&relayer_id); - T::endow_account(&relayers_fund_id); - - // send messages that we're going to confirm - for _ in 1..=i { - send_regular_message::(); - } - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: i as MessageNonce, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![(1, i as MessageNonce, relayer_id.clone())].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { - ensure_relayer_rewarded::(&relayer_id, &relayer_balance); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic where every relayer delivers single messages. - receive_delivery_proof_for_multiple_messages_by_multiple_relayers { - // there actually should be used value of `MaxUnconfirmedMessagesAtInboundLane` from the bridged - // chain, but we're more interested in additional weight/message than in max weight - let i in 1..T::MaxUnconfirmedMessagesAtInboundLane::get() - .try_into() - .expect("Value of MaxUnconfirmedMessagesAtInboundLane is too large "); - - let relayers_fund_id = crate::Module::::relayer_fund_account_id(); - let confirmation_relayer_id = account("relayer", 0, SEED); - let relayers: BTreeMap = (1..=i) - .map(|j| { - let relayer_id = account("relayer", j + 1, SEED); - let relayer_balance = T::account_balance(&relayer_id); - (relayer_id, relayer_balance) - }) - .collect(); - T::endow_account(&relayers_fund_id); - - // send messages that we're going to confirm - for _ in 1..=i { - send_regular_message::(); - } - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: i as MessageNonce, - messages_in_oldest_entry: 1, - total_messages: i as MessageNonce, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: relayers - .keys() - .enumerate() - .map(|(j, relayer_id)| (j as MessageNonce + 1, j as MessageNonce + 1, relayer_id.clone())) - .collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(confirmation_relayer_id), proof, relayers_state) - verify { - for (relayer_id, prev_balance) in relayers { - ensure_relayer_rewarded::(&relayer_id, &prev_balance); - } - } -} - -fn send_regular_message, I: Instance>() { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(MessageData { - payload: vec![], - fee: MESSAGE_FEE.into(), - }); -} - -fn send_regular_message_with_payload, I: Instance>(payload: Vec) { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(MessageData { - payload, - fee: MESSAGE_FEE.into(), - }); -} - -fn confirm_message_delivery, I: Instance>(nonce: MessageNonce) { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - assert!(outbound_lane.confirm_delivery(nonce).is_some()); -} - -fn receive_messages, I: Instance>(nonce: MessageNonce) { - let mut inbound_lane_storage = inbound_lane_storage::(T::bench_lane_id()); - inbound_lane_storage.set_data(InboundLaneData { - relayers: vec![(1, nonce, T::bridged_relayer_id())].into_iter().collect(), - last_confirmed_nonce: 0, - }); -} - -fn ensure_relayer_rewarded, I: Instance>(relayer_id: &T::AccountId, old_balance: &T::OutboundMessageFee) { - let new_balance = T::account_balance(relayer_id); - assert!( - new_balance > *old_balance, - "Relayer haven't received reward for relaying message: old balance = {:?}, new balance = {:?}", - old_balance, - new_balance, - ); -} diff --git a/polkadot/bridges/modules/message-lane/src/inbound_lane.rs b/polkadot/bridges/modules/message-lane/src/inbound_lane.rs deleted file mode 100644 index 7359aa4ed188a6781f19ab5f12cfa84f45e31677..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/inbound_lane.rs +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything about incoming messages receival. - -use bp_message_lane::{ - target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, -}; -use sp_std::prelude::PartialEq; - -/// Inbound lane storage. -pub trait InboundLaneStorage { - /// Delivery and dispatch fee type on source chain. - type MessageFee; - /// Id of relayer on source chain. - type Relayer: PartialEq; - - /// Lane id. - fn id(&self) -> LaneId; - /// Return maximal number of unrewarded relayer entries in inbound lane. - fn max_unrewarded_relayer_entries(&self) -> MessageNonce; - /// Return maximal number of unconfirmed messages in inbound lane. - fn max_unconfirmed_messages(&self) -> MessageNonce; - /// Get lane data from the storage. - fn data(&self) -> InboundLaneData; - /// Update lane data in the storage. - fn set_data(&mut self, data: InboundLaneData); -} - -/// Inbound messages lane. -pub struct InboundLane { - storage: S, -} - -impl InboundLane { - /// Create new inbound lane backed by given storage. - pub fn new(storage: S) -> Self { - InboundLane { storage } - } - - /// Receive state of the corresponding outbound lane. - pub fn receive_state_update(&mut self, outbound_lane_data: OutboundLaneData) -> Option { - let mut data = self.storage.data(); - let last_delivered_nonce = data.last_delivered_nonce(); - - if outbound_lane_data.latest_received_nonce > last_delivered_nonce { - // this is something that should never happen if proofs are correct - return None; - } - if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce { - return None; - } - - let new_confirmed_nonce = outbound_lane_data.latest_received_nonce; - data.last_confirmed_nonce = new_confirmed_nonce; - // Firstly, remove all of the records where higher nonce <= new confirmed nonce - while data - .relayers - .front() - .map(|(_, nonce_high, _)| *nonce_high <= new_confirmed_nonce) - .unwrap_or(false) - { - data.relayers.pop_front(); - } - // Secondly, update the next record with lower nonce equal to new confirmed nonce if needed. - // Note: There will be max. 1 record to update as we don't allow messages from relayers to overlap. - match data.relayers.front_mut() { - Some((nonce_low, _, _)) if *nonce_low < new_confirmed_nonce => { - *nonce_low = new_confirmed_nonce + 1; - } - _ => {} - } - - self.storage.set_data(data); - Some(outbound_lane_data.latest_received_nonce) - } - - /// Receive new message. - pub fn receive_message>( - &mut self, - relayer: S::Relayer, - nonce: MessageNonce, - message_data: DispatchMessageData, - ) -> bool { - let mut data = self.storage.data(); - let is_correct_message = nonce == data.last_delivered_nonce() + 1; - if !is_correct_message { - return false; - } - - // if there are more unrewarded relayer entries than we may accept, reject this message - if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return false; - } - - // if there are more unconfirmed messages than we may accept, reject this message - let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); - if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return false; - } - - let push_new = match data.relayers.back_mut() { - Some((_, nonce_high, last_relayer)) if last_relayer == &relayer => { - *nonce_high = nonce; - false - } - _ => true, - }; - if push_new { - data.relayers.push_back((nonce, nonce, relayer)); - } - - self.storage.set_data(data); - - P::dispatch(DispatchMessage { - key: MessageKey { - lane_id: self.storage.id(), - nonce, - }, - data: message_data, - }); - - true - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - inbound_lane, - mock::{ - message_data, run_test, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, - TEST_RELAYER_B, TEST_RELAYER_C, - }, - DefaultInstance, RuntimeInboundLaneStorage, - }; - - fn receive_regular_message( - lane: &mut InboundLane>, - nonce: MessageNonce, - ) { - assert!(lane.receive_message::( - TEST_RELAYER_A, - nonce, - message_data(REGULAR_PAYLOAD).into() - )); - } - - #[test] - fn receive_status_update_ignores_status_from_the_future() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 10, - ..Default::default() - }), - None, - ); - - assert_eq!(lane.storage.data().last_confirmed_nonce, 0); - }); - } - - #[test] - fn receive_status_update_ignores_obsolete_status() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - receive_regular_message(&mut lane, 2); - receive_regular_message(&mut lane, 3); - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - None, - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - }); - } - - #[test] - fn receive_status_update_works() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - receive_regular_message(&mut lane, 2); - receive_regular_message(&mut lane, 3); - assert_eq!(lane.storage.data().last_confirmed_nonce, 0); - assert_eq!(lane.storage.data().relayers, vec![(1, 3, TEST_RELAYER_A)]); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 2, - ..Default::default() - }), - Some(2), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 2); - assert_eq!(lane.storage.data().relayers, vec![(3, 3, TEST_RELAYER_A)]); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - assert_eq!(lane.storage.data().relayers, vec![]); - }); - } - - #[test] - fn receive_status_update_works_with_batches_from_relayers() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let mut seed_storage_data = lane.storage.data(); - // Prepare data - seed_storage_data.last_confirmed_nonce = 0; - seed_storage_data.relayers.push_back((1, 1, TEST_RELAYER_A)); - // Simulate messages batch (2, 3, 4) from relayer #2 - seed_storage_data.relayers.push_back((2, 4, TEST_RELAYER_B)); - seed_storage_data.relayers.push_back((5, 5, TEST_RELAYER_C)); - lane.storage.set_data(seed_storage_data); - // Check - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - assert_eq!( - lane.storage.data().relayers, - vec![(4, 4, TEST_RELAYER_B), (5, 5, TEST_RELAYER_C)] - ); - }); - } - - #[test] - fn fails_to_receive_message_with_incorrect_nonce() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert!(!lane.receive_message::( - TEST_RELAYER_A, - 10, - message_data(REGULAR_PAYLOAD).into() - )); - assert_eq!(lane.storage.data().last_delivered_nonce(), 0); - }); - } - - #[test] - fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); - for current_nonce in 1..max_nonce + 1 { - assert!(lane.receive_message::( - TEST_RELAYER_A + current_nonce, - current_nonce, - message_data(REGULAR_PAYLOAD).into() - )); - } - // Fails to dispatch new message from different than latest relayer. - assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_A + max_nonce + 1, - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ) - ); - // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. - assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_A + max_nonce, - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ) - ); - }); - } - - #[test] - fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = ::MaxUnconfirmedMessagesAtInboundLane::get(); - for current_nonce in 1..=max_nonce { - assert!(lane.receive_message::( - TEST_RELAYER_A, - current_nonce, - message_data(REGULAR_PAYLOAD).into() - )); - } - // Fails to dispatch new message from different than latest relayer. - assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_B, - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ) - ); - // Fails to dispatch new messages from latest relayer. - assert_eq!( - false, - lane.receive_message::( - TEST_RELAYER_A, - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ) - ); - }); - } - - #[test] - fn correctly_receives_following_messages_from_two_relayers_alternately() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert!(lane.receive_message::( - TEST_RELAYER_A, - 1, - message_data(REGULAR_PAYLOAD).into() - )); - assert!(lane.receive_message::( - TEST_RELAYER_B, - 2, - message_data(REGULAR_PAYLOAD).into() - )); - assert!(lane.receive_message::( - TEST_RELAYER_A, - 3, - message_data(REGULAR_PAYLOAD).into() - )); - assert_eq!( - lane.storage.data().relayers, - vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B), (3, 3, TEST_RELAYER_A)] - ); - }); - } - - #[test] - fn rejects_same_message_from_two_different_relayers() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert!(lane.receive_message::( - TEST_RELAYER_A, - 1, - message_data(REGULAR_PAYLOAD).into() - )); - assert_eq!( - false, - lane.receive_message::(TEST_RELAYER_B, 1, message_data(REGULAR_PAYLOAD).into()) - ); - }); - } - - #[test] - fn correct_message_is_processed_instantly() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - assert_eq!(lane.storage.data().last_delivered_nonce(), 1); - }); - } -} diff --git a/polkadot/bridges/modules/message-lane/src/instant_payments.rs b/polkadot/bridges/modules/message-lane/src/instant_payments.rs deleted file mode 100644 index af5d2cdc410f4b625e8aff32b0a9279560a92e40..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/instant_payments.rs +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Implementation of `MessageDeliveryAndDispatchPayment` trait on top of `Currency` trait. -//! -//! The payment is first transferred to a special `relayers-fund` account and only transferred -//! to the actual relayer in case confirmation is received. - -use bp_message_lane::{ - source_chain::{MessageDeliveryAndDispatchPayment, RelayersRewards, Sender}, - MessageNonce, -}; -use codec::Encode; -use frame_support::traits::{Currency as CurrencyT, ExistenceRequirement, Get}; -use num_traits::Zero; -use sp_runtime::traits::Saturating; -use sp_std::fmt::Debug; - -/// Instant message payments made in given currency. -/// -/// The balance is initally reserved in a special `relayers-fund` account, and transferred -/// to the relayer when message delivery is confirmed. -/// -/// Additionaly, confirmation transaction submitter (`confirmation_relayer`) is reimbursed -/// with the confirmation rewards (part of message fee, reserved to pay for delivery confirmation). -/// -/// NOTE The `relayers-fund` account must always exist i.e. be over Existential Deposit (ED; the -/// pallet enforces that) to make sure that even if the message cost is below ED it is still payed -/// to the relayer account. -/// NOTE It's within relayer's interest to keep their balance above ED as well, to make sure they -/// can receive the payment. -pub struct InstantCurrencyPayments { - _phantom: sp_std::marker::PhantomData<(T, Currency, GetConfirmationFee, RootAccount)>, -} - -impl MessageDeliveryAndDispatchPayment - for InstantCurrencyPayments -where - T: frame_system::Config, - Currency: CurrencyT, - Currency::Balance: From, - GetConfirmationFee: Get, - RootAccount: Get>, -{ - type Error = &'static str; - - fn initialize(relayer_fund_account: &T::AccountId) -> usize { - assert!( - frame_system::Pallet::::account_exists(relayer_fund_account), - "The relayer fund account ({:?}) must exist for the message lanes pallet to work correctly.", - relayer_fund_account, - ); - 1 - } - - fn pay_delivery_and_dispatch_fee( - submitter: &Sender, - fee: &Currency::Balance, - relayer_fund_account: &T::AccountId, - ) -> Result<(), Self::Error> { - let root_account = RootAccount::get(); - let account = match submitter { - Sender::Signed(submitter) => submitter, - Sender::Root | Sender::None => root_account - .as_ref() - .ok_or("Sending messages using Root or None origin is disallowed.")?, - }; - - Currency::transfer( - account, - relayer_fund_account, - *fee, - // it's fine for the submitter to go below Existential Deposit and die. - ExistenceRequirement::AllowDeath, - ) - .map_err(Into::into) - } - - fn pay_relayers_rewards( - confirmation_relayer: &T::AccountId, - relayers_rewards: RelayersRewards, - relayer_fund_account: &T::AccountId, - ) { - pay_relayers_rewards::( - confirmation_relayer, - relayers_rewards, - relayer_fund_account, - GetConfirmationFee::get(), - ); - } -} - -/// Pay rewards to given relayers, optionally rewarding confirmation relayer. -fn pay_relayers_rewards( - confirmation_relayer: &AccountId, - relayers_rewards: RelayersRewards, - relayer_fund_account: &AccountId, - confirmation_fee: Currency::Balance, -) where - AccountId: Debug + Default + Encode + PartialEq, - Currency: CurrencyT, - Currency::Balance: From, -{ - // reward every relayer except `confirmation_relayer` - let mut confirmation_relayer_reward = Currency::Balance::zero(); - for (relayer, reward) in relayers_rewards { - let mut relayer_reward = reward.reward; - - if relayer != *confirmation_relayer { - // If delivery confirmation is submitted by other relayer, let's deduct confirmation fee - // from relayer reward. - // - // If confirmation fee has been increased (or if it was the only component of message fee), - // then messages relayer may receive zero reward. - let mut confirmation_reward = confirmation_fee.saturating_mul(reward.messages.into()); - if confirmation_reward > relayer_reward { - confirmation_reward = relayer_reward; - } - relayer_reward = relayer_reward.saturating_sub(confirmation_reward); - confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(confirmation_reward); - } else { - // If delivery confirmation is submitted by this relayer, let's add confirmation fee - // from other relayers to this relayer reward. - confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(reward.reward); - continue; - } - - pay_relayer_reward::(relayer_fund_account, &relayer, relayer_reward); - } - - // finally - pay reward to confirmation relayer - pay_relayer_reward::(relayer_fund_account, confirmation_relayer, confirmation_relayer_reward); -} - -/// Transfer funds from relayers fund account to given relayer. -fn pay_relayer_reward( - relayer_fund_account: &AccountId, - relayer_account: &AccountId, - reward: Currency::Balance, -) where - AccountId: Debug, - Currency: CurrencyT, -{ - if reward.is_zero() { - return; - } - - let pay_result = Currency::transfer( - relayer_fund_account, - relayer_account, - reward, - // the relayer fund account must stay above ED (needs to be pre-funded) - ExistenceRequirement::KeepAlive, - ); - - match pay_result { - Ok(_) => frame_support::debug::trace!( - target: "runtime", - "Rewarded relayer {:?} with {:?}", - relayer_account, - reward, - ), - Err(error) => frame_support::debug::trace!( - target: "runtime", - "Failed to pay relayer {:?} reward {:?}: {:?}", - relayer_account, - reward, - error, - ), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, AccountId as TestAccountId, Balance as TestBalance, TestRuntime}; - use bp_message_lane::source_chain::RelayerRewards; - - type Balances = pallet_balances::Pallet; - - const RELAYER_1: TestAccountId = 1; - const RELAYER_2: TestAccountId = 2; - const RELAYER_3: TestAccountId = 3; - const RELAYERS_FUND_ACCOUNT: TestAccountId = crate::mock::ENDOWED_ACCOUNT; - - fn relayers_rewards() -> RelayersRewards { - vec![ - ( - RELAYER_1, - RelayerRewards { - reward: 100, - messages: 2, - }, - ), - ( - RELAYER_2, - RelayerRewards { - reward: 100, - messages: 3, - }, - ), - ] - .into_iter() - .collect() - } - - #[test] - fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { - run_test(|| { - pay_relayers_rewards::(&RELAYER_2, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10); - - assert_eq!(Balances::free_balance(&RELAYER_1), 80); - assert_eq!(Balances::free_balance(&RELAYER_2), 120); - }); - } - - #[test] - fn confirmation_relayer_is_rewarded_if_it_has_not_delivered_any_delivered_messages() { - run_test(|| { - pay_relayers_rewards::(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10); - - assert_eq!(Balances::free_balance(&RELAYER_1), 80); - assert_eq!(Balances::free_balance(&RELAYER_2), 70); - assert_eq!(Balances::free_balance(&RELAYER_3), 50); - }); - } - - #[test] - fn only_confirmation_relayer_is_rewarded_if_confirmation_fee_has_significantly_increased() { - run_test(|| { - pay_relayers_rewards::(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 1000); - - assert_eq!(Balances::free_balance(&RELAYER_1), 0); - assert_eq!(Balances::free_balance(&RELAYER_2), 0); - assert_eq!(Balances::free_balance(&RELAYER_3), 200); - }); - } -} diff --git a/polkadot/bridges/modules/message-lane/src/lib.rs b/polkadot/bridges/modules/message-lane/src/lib.rs deleted file mode 100644 index 45da09eba0ff49b594a97e4b401ae21d6cf50cd4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/lib.rs +++ /dev/null @@ -1,1575 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows sending and receiving messages using lane concept: -//! -//! 1) the message is sent using `send_message()` call; -//! 2) every outbound message is assigned nonce; -//! 3) the messages are stored in the storage; -//! 4) external component (relay) delivers messages to bridged chain; -//! 5) messages are processed in order (ordered by assigned nonce); -//! 6) relay may send proof-of-delivery back to this chain. -//! -//! Once message is sent, its progress can be tracked by looking at module events. -//! The assigned nonce is reported using `MessageAccepted` event. When message is -//! delivered to the the bridged chain, it is reported using `MessagesDelivered` event. -//! -//! **IMPORTANT NOTE**: after generating weights (custom `WeighInfo` implementation) for -//! your runtime (where this module is plugged to), please add test for these weights. -//! The test should call the `ensure_weights_are_correct` function from this module. -//! If this test fails with your weights, then either weights are computed incorrectly, -//! or some benchmarks assumptions are broken for your runtime. - -#![cfg_attr(not(feature = "std"), no_std)] - -pub use crate::weights_ext::{ - ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt, - EXPECTED_DEFAULT_MESSAGE_LENGTH, -}; - -use crate::inbound_lane::{InboundLane, InboundLaneStorage}; -use crate::outbound_lane::{OutboundLane, OutboundLaneStorage}; -use crate::weights::WeightInfo; - -use bp_message_lane::{ - source_chain::{LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, TargetHeaderChain}, - target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - total_unrewarded_messages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce, MessagePayload, - OutboundLaneData, Parameter as MessageLaneParameter, UnrewardedRelayersState, -}; -use bp_runtime::Size; -use codec::{Decode, Encode}; -use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, ensure, - traits::Get, - weights::{DispatchClass, Weight}, - Parameter, StorageMap, -}; -use frame_system::{ensure_signed, RawOrigin}; -use num_traits::{SaturatingAdd, Zero}; -use sp_runtime::{traits::BadOrigin, DispatchResult}; -use sp_std::{cell::RefCell, cmp::PartialOrd, marker::PhantomData, prelude::*}; - -mod inbound_lane; -mod outbound_lane; -mod weights_ext; - -pub mod instant_payments; -pub mod weights; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -#[cfg(test)] -mod mock; - -/// The module configuration trait -pub trait Config: frame_system::Config { - // General types - - /// They overarching event type. - type Event: From> + Into<::Event>; - /// Benchmarks results from runtime we're plugged into. - type WeightInfo: WeightInfoExt; - /// Pallet parameter that is opaque to the pallet itself, but may be used by the runtime - /// for integrating the pallet. - /// - /// All pallet parameters may only be updated either by the root, or by the pallet owner. - type Parameter: MessageLaneParameter; - - /// Maximal number of messages that may be pruned during maintenance. Maintenance occurs - /// whenever new message is sent. The reason is that if you want to use lane, you should - /// be ready to pay for its maintenance. - type MaxMessagesToPruneAtOnce: Get; - /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the - /// relayer has delivered messages, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations yet. - /// - /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep - /// in mind that the same relayer account may take several (non-consecutive) entries in this - /// set. - type MaxUnrewardedRelayerEntriesAtInboundLane: Get; - /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the - /// message has been delivered, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations for these messages yet. - /// - /// This constant limits difference between last message from last entry of the - /// `InboundLaneData::relayers` and first message at the first entry. - /// - /// There is no point of making this parameter lesser than MaxUnrewardedRelayerEntriesAtInboundLane, - /// because then maximal number of relayer entries will be limited by maximal number of messages. - /// - /// This value also represents maximal number of messages in single delivery transaction. Transaction - /// that is declaring more messages than this value, will be rejected. Even if these messages are - /// from different lanes. - type MaxUnconfirmedMessagesAtInboundLane: Get; - - /// Payload type of outbound messages. This payload is dispatched on the bridged chain. - type OutboundPayload: Parameter + Size; - /// Message fee type of outbound messages. This fee is paid on this chain. - type OutboundMessageFee: Default + From + PartialOrd + Parameter + SaturatingAdd + Zero; - - /// Payload type of inbound messages. This payload is dispatched on this chain. - type InboundPayload: Decode; - /// Message fee type of inbound messages. This fee is paid on the bridged chain. - type InboundMessageFee: Decode; - /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the bridged chain. - type InboundRelayer: Parameter; - - /// A type which can be turned into an AccountId from a 256-bit hash. - /// - /// Used when deriving the shared relayer fund account. - type AccountIdConverter: sp_runtime::traits::Convert; - - // Types that are used by outbound_lane (on source chain). - - /// Target header chain. - type TargetHeaderChain: TargetHeaderChain; - /// Message payload verifier. - type LaneMessageVerifier: LaneMessageVerifier; - /// Message delivery payment. - type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment; - - // Types that are used by inbound_lane (on target chain). - - /// Source header chain, as it is represented on target chain. - type SourceHeaderChain: SourceHeaderChain; - /// Message dispatch. - type MessageDispatch: MessageDispatch; -} - -/// Shortcut to messages proof type for Config. -type MessagesProofOf = - <>::SourceHeaderChain as SourceHeaderChain<>::InboundMessageFee>>::MessagesProof; -/// Shortcut to messages delivery proof type for Config. -type MessagesDeliveryProofOf = <>::TargetHeaderChain as TargetHeaderChain< - >::OutboundPayload, - ::AccountId, ->>::MessagesDeliveryProof; - -decl_error! { - pub enum Error for Module, I: Instance> { - /// All pallet operations are halted. - Halted, - /// Message has been treated as invalid by chain verifier. - MessageRejectedByChainVerifier, - /// Message has been treated as invalid by lane verifier. - MessageRejectedByLaneVerifier, - /// Submitter has failed to pay fee for delivering and dispatching messages. - FailedToWithdrawMessageFee, - /// The transaction brings too many messages. - TooManyMessagesInTheProof, - /// Invalid messages has been submitted. - InvalidMessagesProof, - /// Invalid messages dispatch weight has been declared by the relayer. - InvalidMessagesDispatchWeight, - /// Invalid messages delivery proof has been submitted. - InvalidMessagesDeliveryProof, - /// The relayer has declared invalid unrewarded relayers state in the `receive_messages_delivery_proof` call. - InvalidUnrewardedRelayersState, - /// The message someone is trying to work with (i.e. increase fee) is already-delivered. - MessageIsAlreadyDelivered, - /// The message someone is trying to work with (i.e. increase fee) is not yet sent. - MessageIsNotYetSent - } -} - -decl_storage! { - trait Store for Module, I: Instance = DefaultInstance> as MessageLane { - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - pub ModuleOwner get(fn module_owner): Option; - /// If true, all pallet transactions are failed immediately. - pub IsHalted get(fn is_halted) config(): bool; - /// Map of lane id => inbound lane data. - pub InboundLanes: map hasher(blake2_128_concat) LaneId => InboundLaneData; - /// Map of lane id => outbound lane data. - pub OutboundLanes: map hasher(blake2_128_concat) LaneId => OutboundLaneData; - /// All queued outbound messages. - pub OutboundMessages: map hasher(blake2_128_concat) MessageKey => Option>; - } - add_extra_genesis { - config(phantom): sp_std::marker::PhantomData; - config(owner): Option; - build(|config| { - if let Some(ref owner) = config.owner { - >::put(owner); - } - }) - } -} - -decl_event!( - pub enum Event - where - AccountId = ::AccountId, - Parameter = >::Parameter, - { - /// Pallet parameter has been updated. - ParameterUpdated(Parameter), - /// Message has been accepted and is waiting to be delivered. - MessageAccepted(LaneId, MessageNonce), - /// Messages in the inclusive range have been delivered and processed by the bridged chain. - MessagesDelivered(LaneId, MessageNonce, MessageNonce), - /// Phantom member, never used. - Dummy(PhantomData<(AccountId, I)>), - } -); - -decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; - - /// Ensure runtime invariants. - fn on_runtime_upgrade() -> Weight { - let reads = T::MessageDeliveryAndDispatchPayment::initialize( - &Self::relayer_fund_account_id() - ); - T::DbWeight::get().reads(reads as u64) - } - - /// Change `ModuleOwner`. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn set_owner(origin, new_owner: Option) { - ensure_owner_or_root::(origin)?; - match new_owner { - Some(new_owner) => { - ModuleOwner::::put(&new_owner); - frame_support::debug::info!("Setting pallet Owner to: {:?}", new_owner); - }, - None => { - ModuleOwner::::kill(); - frame_support::debug::info!("Removed Owner of pallet."); - }, - } - } - - /// Halt all pallet operations. Operations may be resumed using `resume_operations` call. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn halt_operations(origin) { - ensure_owner_or_root::(origin)?; - IsHalted::::put(true); - frame_support::debug::warn!("Stopping pallet operations."); - } - - /// Resume all pallet operations. May be called even if pallet is halted. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn resume_operations(origin) { - ensure_owner_or_root::(origin)?; - IsHalted::::put(false); - frame_support::debug::info!("Resuming pallet operations."); - } - - /// Update pallet parameter. - /// - /// May only be called either by root, or by `ModuleOwner`. - /// - /// The weight is: single read for permissions check + 2 writes for parameter value and event. - #[weight = (T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational)] - pub fn update_pallet_parameter(origin, parameter: T::Parameter) { - ensure_owner_or_root::(origin)?; - parameter.save(); - Self::deposit_event(RawEvent::ParameterUpdated(parameter)); - } - - /// Send message over lane. - #[weight = T::WeightInfo::send_message_weight(payload)] - pub fn send_message( - origin, - lane_id: LaneId, - payload: T::OutboundPayload, - delivery_and_dispatch_fee: T::OutboundMessageFee, - ) -> DispatchResult { - ensure_operational::()?; - let submitter = origin.into().map_err(|_| BadOrigin)?; - - // let's first check if message can be delivered to target chain - T::TargetHeaderChain::verify_message(&payload) - .map_err(|err| { - frame_support::debug::trace!( - "Message to lane {:?} is rejected by target chain: {:?}", - lane_id, - err, - ); - - Error::::MessageRejectedByChainVerifier - })?; - - // now let's enforce any additional lane rules - let mut lane = outbound_lane::(lane_id); - T::LaneMessageVerifier::verify_message( - &submitter, - &delivery_and_dispatch_fee, - &lane_id, - &lane.data(), - &payload, - ).map_err(|err| { - frame_support::debug::trace!( - "Message to lane {:?} is rejected by lane verifier: {:?}", - lane_id, - err, - ); - - Error::::MessageRejectedByLaneVerifier - })?; - - // let's withdraw delivery and dispatch fee from submitter - T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( - &submitter, - &delivery_and_dispatch_fee, - &Self::relayer_fund_account_id(), - ).map_err(|err| { - frame_support::debug::trace!( - "Message to lane {:?} is rejected because submitter {:?} is unable to pay fee {:?}: {:?}", - lane_id, - submitter, - delivery_and_dispatch_fee, - err, - ); - - Error::::FailedToWithdrawMessageFee - })?; - - // finally, save message in outbound storage and emit event - let encoded_payload = payload.encode(); - let encoded_payload_len = encoded_payload.len(); - let nonce = lane.send_message(MessageData { - payload: encoded_payload, - fee: delivery_and_dispatch_fee, - }); - lane.prune_messages(T::MaxMessagesToPruneAtOnce::get()); - - frame_support::debug::trace!( - "Accepted message {} to lane {:?}. Message size: {:?}", - nonce, - lane_id, - encoded_payload_len, - ); - - Self::deposit_event(RawEvent::MessageAccepted(lane_id, nonce)); - - Ok(()) - } - - /// Pay additional fee for the message. - #[weight = T::WeightInfo::increase_message_fee()] - pub fn increase_message_fee( - origin, - lane_id: LaneId, - nonce: MessageNonce, - additional_fee: T::OutboundMessageFee, - ) -> DispatchResult { - // if someone tries to pay for already-delivered message, we're rejecting this intention - // (otherwise this additional fee will be locked forever in relayers fund) - // - // if someone tries to pay for not-yet-sent message, we're rejeting this intention, or - // we're risking to have mess in the storage - let lane = outbound_lane::(lane_id); - ensure!(nonce > lane.data().latest_received_nonce, Error::::MessageIsAlreadyDelivered); - ensure!(nonce <= lane.data().latest_generated_nonce, Error::::MessageIsNotYetSent); - - // withdraw additional fee from submitter - let submitter = origin.into().map_err(|_| BadOrigin)?; - T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( - &submitter, - &additional_fee, - &Self::relayer_fund_account_id(), - ).map_err(|err| { - frame_support::debug::trace!( - "Submitter {:?} can't pay additional fee {:?} for the message {:?}/{:?}: {:?}", - submitter, - additional_fee, - lane_id, - nonce, - err, - ); - - Error::::FailedToWithdrawMessageFee - })?; - - // and finally update fee in the storage - let message_key = MessageKey { lane_id, nonce }; - OutboundMessages::::mutate(message_key, |message_data| { - // saturating_add is fine here - overflow here means that someone controls all - // chain funds, which shouldn't ever happen + `pay_delivery_and_dispatch_fee` - // above will fail before we reach here - let message_data = message_data - .as_mut() - .expect("the message is sent and not yet delivered; so it is in the storage; qed"); - message_data.fee = message_data.fee.saturating_add(&additional_fee); - }); - - Ok(()) - } - - /// Receive messages proof from bridged chain. - /// - /// The weight of the call assumes that the transaction always brings outbound lane - /// state update. Because of that, the submitter (relayer) has no benefit of not including - /// this data in the transaction, so reward confirmations lags should be minimal. - #[weight = T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight)] - pub fn receive_messages_proof( - origin, - relayer_id: T::InboundRelayer, - proof: MessagesProofOf, - messages_count: u32, - dispatch_weight: Weight, - ) -> DispatchResult { - ensure_operational::()?; - let _ = ensure_signed(origin)?; - - // reject transactions that are declaring too many messages - ensure!( - MessageNonce::from(messages_count) <= T::MaxUnconfirmedMessagesAtInboundLane::get(), - Error::::TooManyMessagesInTheProof - ); - - // verify messages proof && convert proof into messages - let messages = verify_and_decode_messages_proof::< - T::SourceHeaderChain, - T::InboundMessageFee, - T::InboundPayload, - >(proof, messages_count) - .map_err(|err| { - frame_support::debug::trace!( - "Rejecting invalid messages proof: {:?}", - err, - ); - - Error::::InvalidMessagesProof - })?; - - // verify that relayer is paying actual dispatch weight - let actual_dispatch_weight: Weight = messages - .values() - .map(|lane_messages| lane_messages - .messages - .iter() - .map(T::MessageDispatch::dispatch_weight) - .fold(0, |sum, weight| sum.saturating_add(&weight)) - ) - .fold(0, |sum, weight| sum.saturating_add(weight)); - if dispatch_weight < actual_dispatch_weight { - frame_support::debug::trace!( - "Rejecting messages proof because of dispatch weight mismatch: declared={}, expected={}", - dispatch_weight, - actual_dispatch_weight, - ); - - return Err(Error::::InvalidMessagesDispatchWeight.into()); - } - - // dispatch messages and (optionally) update lane(s) state(s) - let mut total_messages = 0; - let mut valid_messages = 0; - for (lane_id, lane_data) in messages { - let mut lane = inbound_lane::(lane_id); - - if let Some(lane_state) = lane_data.lane_state { - let updated_latest_confirmed_nonce = lane.receive_state_update(lane_state); - if let Some(updated_latest_confirmed_nonce) = updated_latest_confirmed_nonce { - frame_support::debug::trace!( - "Received lane {:?} state update: latest_confirmed_nonce={}", - lane_id, - updated_latest_confirmed_nonce, - ); - } - } - - for message in lane_data.messages { - debug_assert_eq!(message.key.lane_id, lane_id); - - total_messages += 1; - if lane.receive_message::(relayer_id.clone(), message.key.nonce, message.data) { - valid_messages += 1; - } - } - } - - frame_support::debug::trace!( - "Received messages: total={}, valid={}", - total_messages, - valid_messages, - ); - - Ok(()) - } - - /// Receive messages delivery proof from bridged chain. - #[weight = T::WeightInfo::receive_messages_delivery_proof_weight(proof, relayers_state)] - pub fn receive_messages_delivery_proof( - origin, - proof: MessagesDeliveryProofOf, - relayers_state: UnrewardedRelayersState, - ) -> DispatchResult { - ensure_operational::()?; - - let confirmation_relayer = ensure_signed(origin)?; - let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| { - frame_support::debug::trace!( - "Rejecting invalid messages delivery proof: {:?}", - err, - ); - - Error::::InvalidMessagesDeliveryProof - })?; - - // verify that the relayer has declared correct `lane_data::relayers` state - // (we only care about total number of entries and messages, because this affects call weight) - ensure!( - total_unrewarded_messages(&lane_data.relayers) - .unwrap_or(MessageNonce::MAX) == relayers_state.total_messages - && lane_data.relayers.len() as MessageNonce == relayers_state.unrewarded_relayer_entries, - Error::::InvalidUnrewardedRelayersState - ); - - // mark messages as delivered - let mut lane = outbound_lane::(lane_id); - let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new(); - let last_delivered_nonce = lane_data.last_delivered_nonce(); - let received_range = lane.confirm_delivery(last_delivered_nonce); - if let Some(received_range) = received_range { - Self::deposit_event(RawEvent::MessagesDelivered(lane_id, received_range.0, received_range.1)); - - // remember to reward relayers that have delivered messages - // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain - for (nonce_low, nonce_high, relayer) in lane_data.relayers { - let nonce_begin = sp_std::cmp::max(nonce_low, received_range.0); - let nonce_end = sp_std::cmp::min(nonce_high, received_range.1); - - // loop won't proceed if current entry is ahead of received range (begin > end). - // this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain - let mut relayer_reward = relayers_rewards.entry(relayer).or_default(); - for nonce in nonce_begin..nonce_end + 1 { - let message_data = OutboundMessages::::get(MessageKey { - lane_id, - nonce, - }).expect("message was just confirmed; we never prune unconfirmed messages; qed"); - relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee); - relayer_reward.messages += 1; - } - } - } - - // if some new messages have been confirmed, reward relayers - if !relayers_rewards.is_empty() { - let relayer_fund_account = Self::relayer_fund_account_id(); - >::MessageDeliveryAndDispatchPayment::pay_relayers_rewards( - &confirmation_relayer, - relayers_rewards, - &relayer_fund_account, - ); - } - - frame_support::debug::trace!( - "Received messages delivery proof up to (and including) {} at lane {:?}", - last_delivered_nonce, - lane_id, - ); - - Ok(()) - } - } -} - -impl, I: Instance> Module { - /// Get payload of given outbound message. - pub fn outbound_message_payload(lane: LaneId, nonce: MessageNonce) -> Option { - OutboundMessages::::get(MessageKey { lane_id: lane, nonce }).map(|message_data| message_data.payload) - } - - /// Get nonce of latest generated message at given outbound lane. - pub fn outbound_latest_generated_nonce(lane: LaneId) -> MessageNonce { - OutboundLanes::::get(&lane).latest_generated_nonce - } - - /// Get nonce of latest confirmed message at given outbound lane. - pub fn outbound_latest_received_nonce(lane: LaneId) -> MessageNonce { - OutboundLanes::::get(&lane).latest_received_nonce - } - - /// Get nonce of latest received message at given inbound lane. - pub fn inbound_latest_received_nonce(lane: LaneId) -> MessageNonce { - InboundLanes::::get(&lane).last_delivered_nonce() - } - - /// Get nonce of latest confirmed message at given inbound lane. - pub fn inbound_latest_confirmed_nonce(lane: LaneId) -> MessageNonce { - InboundLanes::::get(&lane).last_confirmed_nonce - } - - /// Get state of unrewarded relayers set. - pub fn inbound_unrewarded_relayers_state( - lane: bp_message_lane::LaneId, - ) -> bp_message_lane::UnrewardedRelayersState { - let relayers = InboundLanes::::get(&lane).relayers; - bp_message_lane::UnrewardedRelayersState { - unrewarded_relayer_entries: relayers.len() as _, - messages_in_oldest_entry: relayers.front().map(|(begin, end, _)| 1 + end - begin).unwrap_or(0), - total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), - } - } - - /// AccountId of the shared relayer fund account. - /// - /// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending - /// on the implementation it can be used to store relayers rewards. - /// See [InstantCurrencyPayments] for a concrete implementation. - pub fn relayer_fund_account_id() -> T::AccountId { - use sp_runtime::traits::Convert; - let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID); - T::AccountIdConverter::convert(encoded_id) - } -} - -/// Getting storage keys for messages and lanes states. These keys are normally used when building -/// messages and lanes states proofs. -/// -/// Keep in mind that all functions in this module are **NOT** using passed `T` argument, so any -/// runtime can be passed. E.g. if you're verifying proof from Runtime1 in Runtime2, you only have -/// access to Runtime2 and you may pass it to the functions, where required. This is because our -/// maps are not using any Runtime-specific data in the keys. -/// -/// On the other side, passing correct instance is required. So if proof has been crafted by the -/// Instance1, you should verify it using Instance1. This is inconvenient if you're using different -/// instances on different sides of the bridge. I.e. in Runtime1 it is Instance2, but on Runtime2 -/// it is Instance42. But there's no other way, but to craft this key manually (which is what I'm -/// trying to avoid here) - by using strings like "Instance2", "OutboundMessages", etc. -pub mod storage_keys { - use super::*; - use frame_support::storage::generator::StorageMap; - use sp_core::storage::StorageKey; - - /// Storage key of the outbound message in the runtime storage. - pub fn message_key, I: Instance>(lane: &LaneId, nonce: MessageNonce) -> StorageKey { - let message_key = MessageKey { lane_id: *lane, nonce }; - let raw_storage_key = OutboundMessages::::storage_map_final_key(message_key); - StorageKey(raw_storage_key) - } - - /// Storage key of the outbound message lane state in the runtime storage. - pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { - StorageKey(OutboundLanes::::storage_map_final_key(*lane)) - } - - /// Storage key of the inbound message lane state in the runtime storage. - pub fn inbound_lane_data_key, I: Instance>(lane: &LaneId) -> StorageKey { - StorageKey(InboundLanes::::storage_map_final_key(*lane)) - } -} - -/// Ensure that the origin is either root, or `ModuleOwner`. -fn ensure_owner_or_root, I: Instance>(origin: T::Origin) -> Result<(), BadOrigin> { - match origin.into() { - Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) if Some(signer) == Module::::module_owner().as_ref() => Ok(()), - _ => Err(BadOrigin), - } -} - -/// Ensure that the pallet is in operational mode (not halted). -fn ensure_operational, I: Instance>() -> Result<(), Error> { - if IsHalted::::get() { - Err(Error::::Halted) - } else { - Ok(()) - } -} - -/// Creates new inbound lane object, backed by runtime storage. -fn inbound_lane, I: Instance>(lane_id: LaneId) -> InboundLane> { - InboundLane::new(inbound_lane_storage::(lane_id)) -} - -/// Creates new runtime inbound lane storage. -fn inbound_lane_storage, I: Instance>(lane_id: LaneId) -> RuntimeInboundLaneStorage { - RuntimeInboundLaneStorage { - lane_id, - cached_data: RefCell::new(None), - _phantom: Default::default(), - } -} - -/// Creates new outbound lane object, backed by runtime storage. -fn outbound_lane, I: Instance>(lane_id: LaneId) -> OutboundLane> { - OutboundLane::new(RuntimeOutboundLaneStorage { - lane_id, - _phantom: Default::default(), - }) -} - -/// Runtime inbound lane storage. -struct RuntimeInboundLaneStorage, I = DefaultInstance> { - lane_id: LaneId, - cached_data: RefCell>>, - _phantom: PhantomData, -} - -impl, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage { - type MessageFee = T::InboundMessageFee; - type Relayer = T::InboundRelayer; - - fn id(&self) -> LaneId { - self.lane_id - } - - fn max_unrewarded_relayer_entries(&self) -> MessageNonce { - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() - } - - fn max_unconfirmed_messages(&self) -> MessageNonce { - T::MaxUnconfirmedMessagesAtInboundLane::get() - } - - fn data(&self) -> InboundLaneData { - match self.cached_data.clone().into_inner() { - Some(data) => data, - None => { - let data = InboundLanes::::get(&self.lane_id); - *self.cached_data.try_borrow_mut().expect( - "we're in the single-threaded environment;\ - we have no recursive borrows; qed", - ) = Some(data.clone()); - data - } - } - } - - fn set_data(&mut self, data: InboundLaneData) { - *self.cached_data.try_borrow_mut().expect( - "we're in the single-threaded environment;\ - we have no recursive borrows; qed", - ) = Some(data.clone()); - InboundLanes::::insert(&self.lane_id, data) - } -} - -/// Runtime outbound lane storage. -struct RuntimeOutboundLaneStorage { - lane_id: LaneId, - _phantom: PhantomData<(T, I)>, -} - -impl, I: Instance> OutboundLaneStorage for RuntimeOutboundLaneStorage { - type MessageFee = T::OutboundMessageFee; - - fn id(&self) -> LaneId { - self.lane_id - } - - fn data(&self) -> OutboundLaneData { - OutboundLanes::::get(&self.lane_id) - } - - fn set_data(&mut self, data: OutboundLaneData) { - OutboundLanes::::insert(&self.lane_id, data) - } - - #[cfg(test)] - fn message(&self, nonce: &MessageNonce) -> Option> { - OutboundMessages::::get(MessageKey { - lane_id: self.lane_id, - nonce: *nonce, - }) - } - - fn save_message(&mut self, nonce: MessageNonce, mesage_data: MessageData) { - OutboundMessages::::insert( - MessageKey { - lane_id: self.lane_id, - nonce, - }, - mesage_data, - ); - } - - fn remove_message(&mut self, nonce: &MessageNonce) { - OutboundMessages::::remove(MessageKey { - lane_id: self.lane_id, - nonce: *nonce, - }); - } -} - -/// Verify messages proof and return proved messages with decoded payload. -fn verify_and_decode_messages_proof, Fee, DispatchPayload: Decode>( - proof: Chain::MessagesProof, - messages_count: u32, -) -> Result>, Chain::Error> { - // `receive_messages_proof` weight formula and `MaxUnconfirmedMessagesAtInboundLane` check - // guarantees that the `message_count` is sane and Vec may be allocated. - // (tx with too many messages will either be rejected from the pool, or will fail earlier) - Chain::verify_messages_proof(proof, messages_count).map(|messages_by_lane| { - messages_by_lane - .into_iter() - .map(|(lane, lane_data)| { - ( - lane, - ProvedLaneMessages { - lane_state: lane_data.lane_state, - messages: lane_data.messages.into_iter().map(Into::into).collect(), - }, - ) - }) - .collect() - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - message, run_test, Event as TestEvent, Origin, TestMessageDeliveryAndDispatchPayment, TestMessageLaneParameter, - TestMessagesDeliveryProof, TestMessagesProof, TestPayload, TestRuntime, TokenConversionRate, - PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, - }; - use bp_message_lane::UnrewardedRelayersState; - use frame_support::{assert_noop, assert_ok}; - use frame_system::{EventRecord, Module as System, Phase}; - use hex_literal::hex; - use sp_runtime::DispatchError; - - fn get_ready_for_events() { - System::::set_block_number(1); - System::::reset_events(); - } - - fn send_regular_message() { - get_ready_for_events(); - - assert_ok!(Module::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - REGULAR_PAYLOAD.1, - )); - - // check event with assigned nonce - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::MessageAccepted(TEST_LANE_ID, 1)), - topics: vec![], - }], - ); - - // check that fee has been withdrawn from submitter - assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, REGULAR_PAYLOAD.1)); - } - - fn receive_messages_delivery_proof() { - System::::set_block_number(1); - System::::reset_events(); - - assert_ok!(Module::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - ..Default::default() - }, - ))), - Default::default(), - )); - - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::MessagesDelivered(TEST_LANE_ID, 1, 1)), - topics: vec![], - }], - ); - } - - #[test] - fn pallet_owner_may_change_owner() { - run_test(|| { - ModuleOwner::::put(2); - - assert_ok!(Module::::set_owner(Origin::root(), Some(1))); - assert_noop!( - Module::::halt_operations(Origin::signed(2)), - DispatchError::BadOrigin, - ); - assert_ok!(Module::::halt_operations(Origin::root())); - - assert_ok!(Module::::set_owner(Origin::signed(1), None)); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - assert_noop!( - Module::::resume_operations(Origin::signed(2)), - DispatchError::BadOrigin, - ); - assert_ok!(Module::::resume_operations(Origin::root())); - }); - } - - #[test] - fn pallet_may_be_halted_by_root() { - run_test(|| { - assert_ok!(Module::::halt_operations(Origin::root())); - assert_ok!(Module::::resume_operations(Origin::root())); - }); - } - - #[test] - fn pallet_may_be_halted_by_owner() { - run_test(|| { - ModuleOwner::::put(2); - - assert_ok!(Module::::halt_operations(Origin::signed(2))); - assert_ok!(Module::::resume_operations(Origin::signed(2))); - - assert_noop!( - Module::::halt_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - - assert_ok!(Module::::halt_operations(Origin::signed(2))); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn pallet_parameter_may_be_updated_by_root() { - run_test(|| { - get_ready_for_events(); - - let parameter = TestMessageLaneParameter::TokenConversionRate(10.into()); - assert_ok!(Module::::update_pallet_parameter( - Origin::root(), - parameter.clone(), - )); - - assert_eq!(TokenConversionRate::get(), 10.into()); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::ParameterUpdated(parameter)), - topics: vec![], - }], - ); - }); - } - - #[test] - fn pallet_parameter_may_be_updated_by_owner() { - run_test(|| { - ModuleOwner::::put(2); - get_ready_for_events(); - - let parameter = TestMessageLaneParameter::TokenConversionRate(10.into()); - assert_ok!(Module::::update_pallet_parameter( - Origin::signed(2), - parameter.clone(), - )); - - assert_eq!(TokenConversionRate::get(), 10.into()); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::pallet_message_lane(RawEvent::ParameterUpdated(parameter)), - topics: vec![], - }], - ); - }); - } - - #[test] - fn pallet_parameter_cant_be_updated_by_arbitrary_submitter() { - run_test(|| { - assert_noop!( - Module::::update_pallet_parameter( - Origin::signed(2), - TestMessageLaneParameter::TokenConversionRate(10.into()), - ), - DispatchError::BadOrigin, - ); - - ModuleOwner::::put(2); - - assert_noop!( - Module::::update_pallet_parameter( - Origin::signed(1), - TestMessageLaneParameter::TokenConversionRate(10.into()), - ), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn fixed_u128_works_as_i_think() { - // this test is here just to be sure that conversion rate may be represented with FixedU128 - run_test(|| { - use sp_runtime::{FixedPointNumber, FixedU128}; - - // 1:1 conversion that we use by default for testnets - let rialto_token = 1u64; - let rialto_token_in_millau_tokens = TokenConversionRate::get().saturating_mul_int(rialto_token); - assert_eq!(rialto_token_in_millau_tokens, 1); - - // let's say conversion rate is 1:1.7 - let conversion_rate = FixedU128::saturating_from_rational(170, 100); - let rialto_tokens = 100u64; - let rialto_tokens_in_millau_tokens = conversion_rate.saturating_mul_int(rialto_tokens); - assert_eq!(rialto_tokens_in_millau_tokens, 170); - - // let's say conversion rate is 1:0.25 - let conversion_rate = FixedU128::saturating_from_rational(25, 100); - let rialto_tokens = 100u64; - let rialto_tokens_in_millau_tokens = conversion_rate.saturating_mul_int(rialto_tokens); - assert_eq!(rialto_tokens_in_millau_tokens, 25); - }); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(); - - IsHalted::::put(true); - - assert_noop!( - Module::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - REGULAR_PAYLOAD.1, - ), - Error::::Halted, - ); - - assert_noop!( - Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.1, - ), - Error::::Halted, - ); - - assert_noop!( - Module::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - ..Default::default() - }, - ))), - Default::default(), - ), - Error::::Halted, - ); - }); - } - - #[test] - fn send_message_works() { - run_test(|| { - send_regular_message(); - }); - } - - #[test] - fn chain_verifier_rejects_invalid_message_in_send_message() { - run_test(|| { - // messages with this payload are rejected by target chain verifier - assert_noop!( - Module::::send_message( - Origin::signed(1), - TEST_LANE_ID, - PAYLOAD_REJECTED_BY_TARGET_CHAIN, - PAYLOAD_REJECTED_BY_TARGET_CHAIN.1 - ), - Error::::MessageRejectedByChainVerifier, - ); - }); - } - - #[test] - fn lane_verifier_rejects_invalid_message_in_send_message() { - run_test(|| { - // messages with zero fee are rejected by lane verifier - assert_noop!( - Module::::send_message(Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 0), - Error::::MessageRejectedByLaneVerifier, - ); - }); - } - - #[test] - fn message_send_fails_if_submitter_cant_pay_message_fee() { - run_test(|| { - TestMessageDeliveryAndDispatchPayment::reject_payments(); - assert_noop!( - Module::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - REGULAR_PAYLOAD.1 - ), - Error::::FailedToWithdrawMessageFee, - ); - }); - } - - #[test] - fn receive_messages_proof_works() { - run_test(|| { - assert_ok!(Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.1, - )); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1); - }); - } - - #[test] - fn receive_messages_proof_updates_confirmed_message_nonce() { - run_test(|| { - // say we have received 10 messages && last confirmed message is 8 - InboundLanes::::insert( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 8, - relayers: vec![(9, 9, TEST_RELAYER_A), (10, 10, TEST_RELAYER_B)] - .into_iter() - .collect(), - }, - ); - assert_eq!( - Module::::inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - }, - ); - - // message proof includes outbound lane state with latest confirmed message updated to 9 - let mut message_proof: TestMessagesProof = Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); - message_proof.result.as_mut().unwrap()[0].1.lane_state = Some(OutboundLaneData { - latest_received_nonce: 9, - ..Default::default() - }); - - assert_ok!(Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - message_proof, - 1, - REGULAR_PAYLOAD.1, - )); - - assert_eq!( - InboundLanes::::get(TEST_LANE_ID), - InboundLaneData { - last_confirmed_nonce: 9, - relayers: vec![(10, 10, TEST_RELAYER_B), (11, 11, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ); - assert_eq!( - Module::::inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - }, - ); - }); - } - - #[test] - fn receive_messages_proof_rejects_invalid_dispatch_weight() { - run_test(|| { - assert_noop!( - Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.1 - 1, - ), - Error::::InvalidMessagesDispatchWeight, - ); - }); - } - - #[test] - fn receive_messages_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Err(()).into(), - 1, - 0, - ), - Error::::InvalidMessagesProof, - ); - }); - } - - #[test] - fn receive_messages_proof_rejects_proof_with_too_many_messages() { - run_test(|| { - assert_noop!( - Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - u32::MAX, - 0, - ), - Error::::TooManyMessagesInTheProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_works() { - run_test(|| { - send_regular_message(); - receive_messages_delivery_proof(); - - assert_eq!( - OutboundLanes::::get(&TEST_LANE_ID).latest_received_nonce, - 1, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rewards_relayers() { - run_test(|| { - assert_ok!(Module::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - 1000, - )); - assert_ok!(Module::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - 2000, - )); - - // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A - assert_ok!(Module::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A)].into_iter().collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - )); - assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_A, - 1000 - )); - assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_B, - 2000 - )); - - // this reports delivery of both message 1 and message 2 => reward is paid only to TEST_RELAYER_B - assert_ok!(Module::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 2, - ..Default::default() - }, - )); - assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_A, - 1000 - )); - assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid( - TEST_RELAYER_B, - 2000 - )); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Module::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Err(())), - Default::default(), - ), - Error::::InvalidMessagesDeliveryProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { - run_test(|| { - // when number of relayers entires is invalid - assert_noop!( - Module::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when number of messages is invalid - assert_noop!( - Module::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 1, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - }); - } - - #[test] - fn receive_messages_accepts_single_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(1, REGULAR_PAYLOAD); - invalid_message.data.payload = Vec::new(); - - assert_ok!(Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![invalid_message]).into(), - 1, - 0, // weight may be zero in this case (all messages are improperly encoded) - ),); - - assert_eq!( - InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), - 1, - ); - }); - } - - #[test] - fn receive_messages_accepts_batch_with_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(2, REGULAR_PAYLOAD); - invalid_message.data.payload = Vec::new(); - - assert_ok!(Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![ - message(1, REGULAR_PAYLOAD), - invalid_message, - message(3, REGULAR_PAYLOAD), - ]) - .into(), - 3, - REGULAR_PAYLOAD.1 + REGULAR_PAYLOAD.1, - ),); - - assert_eq!( - InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), - 3, - ); - }); - } - - #[test] - fn storage_message_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking all - // previously crafted messages proofs. - assert_eq!( - storage_keys::message_key::(&*b"test", 42).0, - hex!("87f1ffe31b52878f09495ca7482df1a48a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), - ); - } - - #[test] - fn outbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking all - // previously crafted outbound lane state proofs. - assert_eq!( - storage_keys::outbound_lane_data_key::(&*b"test").0, - hex!("87f1ffe31b52878f09495ca7482df1a496c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), - ); - } - - #[test] - fn inbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking all - // previously crafted inbound lane state proofs. - assert_eq!( - storage_keys::inbound_lane_data_key::(&*b"test").0, - hex!("87f1ffe31b52878f09495ca7482df1a4e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), - ); - } - - #[test] - fn actual_dispatch_weight_does_not_overlow() { - run_test(|| { - let message1 = message(1, TestPayload(0, Weight::MAX / 2)); - let message2 = message(2, TestPayload(0, Weight::MAX / 2)); - let message3 = message(2, TestPayload(0, Weight::MAX / 2)); - - assert_noop!( - Module::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - // this may cause overflow if source chain storage is invalid - Ok(vec![message1, message2, message3]).into(), - 3, - 100, - ), - Error::::InvalidMessagesDispatchWeight, - ); - }); - } - - #[test] - fn increase_message_fee_fails_if_message_is_already_delivered() { - run_test(|| { - send_regular_message(); - receive_messages_delivery_proof(); - - assert_noop!( - Module::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), - Error::::MessageIsAlreadyDelivered, - ); - }); - } - - #[test] - fn increase_message_fee_fails_if_message_is_not_yet_sent() { - run_test(|| { - assert_noop!( - Module::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), - Error::::MessageIsNotYetSent, - ); - }); - } - - #[test] - fn increase_message_fee_fails_if_submitter_cant_pay_additional_fee() { - run_test(|| { - send_regular_message(); - - TestMessageDeliveryAndDispatchPayment::reject_payments(); - - assert_noop!( - Module::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), - Error::::FailedToWithdrawMessageFee, - ); - }); - } - - #[test] - fn increase_message_fee_succeeds() { - run_test(|| { - send_regular_message(); - - assert_ok!(Module::::increase_message_fee( - Origin::signed(1), - TEST_LANE_ID, - 1, - 100, - ),); - assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, 100)); - }); - } -} diff --git a/polkadot/bridges/modules/message-lane/src/mock.rs b/polkadot/bridges/modules/message-lane/src/mock.rs deleted file mode 100644 index 3fa10beef476052e747052ad62c6ad02c72a167e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/mock.rs +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use crate::Config; - -use bp_message_lane::{ - source_chain::{ - LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, Sender, TargetHeaderChain, - }, - target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, - Parameter as MessageLaneParameter, -}; -use bp_runtime::Size; -use codec::{Decode, Encode}; -use frame_support::{parameter_types, weights::Weight}; -use sp_core::H256; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - FixedU128, Perbill, -}; -use std::collections::BTreeMap; - -pub type AccountId = u64; -pub type Balance = u64; -#[derive(Decode, Encode, Clone, Debug, PartialEq, Eq)] -pub struct TestPayload(pub u64, pub Weight); -pub type TestMessageFee = u64; -pub type TestRelayer = u64; - -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: H256) -> AccountId { - hash.to_low_u64_ne() - } -} - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as pallet_message_lane; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Event}, - MessageLane: pallet_message_lane::{Pallet, Call, Event}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = SubstrateHeader; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -parameter_types! { - pub const ExistentialDeposit: u64 = 1; -} - -impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); -} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: u64 = 10; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 32; - pub storage TokenConversionRate: FixedU128 = 1.into(); -} - -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] -pub enum TestMessageLaneParameter { - TokenConversionRate(FixedU128), -} - -impl MessageLaneParameter for TestMessageLaneParameter { - fn save(&self) { - match *self { - TestMessageLaneParameter::TokenConversionRate(conversion_rate) => { - TokenConversionRate::set(&conversion_rate) - } - } - } -} - -impl Config for TestRuntime { - type Event = Event; - type WeightInfo = (); - type Parameter = TestMessageLaneParameter; - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = TestPayload; - type OutboundMessageFee = TestMessageFee; - - type InboundPayload = TestPayload; - type InboundMessageFee = TestMessageFee; - type InboundRelayer = TestRelayer; - - type AccountIdConverter = AccountIdConverter; - - type TargetHeaderChain = TestTargetHeaderChain; - type LaneMessageVerifier = TestLaneMessageVerifier; - type MessageDeliveryAndDispatchPayment = TestMessageDeliveryAndDispatchPayment; - - type SourceHeaderChain = TestSourceHeaderChain; - type MessageDispatch = TestMessageDispatch; -} - -impl Size for TestPayload { - fn size_hint(&self) -> u32 { - 16 - } -} - -/// Account that has balance to use in tests. -pub const ENDOWED_ACCOUNT: AccountId = 0xDEAD; - -/// Account id of test relayer. -pub const TEST_RELAYER_A: AccountId = 100; - -/// Account id of additional test relayer - B. -pub const TEST_RELAYER_B: AccountId = 101; - -/// Account id of additional test relayer - C. -pub const TEST_RELAYER_C: AccountId = 102; - -/// Error that is returned by all test implementations. -pub const TEST_ERROR: &str = "Test error"; - -/// Lane that we're using in tests. -pub const TEST_LANE_ID: LaneId = [0, 0, 0, 1]; - -/// Regular message payload. -pub const REGULAR_PAYLOAD: TestPayload = TestPayload(0, 50); - -/// Payload that is rejected by `TestTargetHeaderChain`. -pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = TestPayload(1, 50); - -/// Vec of proved messages, grouped by lane. -pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages>)>; - -/// Test messages proof. -#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)] -pub struct TestMessagesProof { - pub result: Result, -} - -impl Size for TestMessagesProof { - fn size_hint(&self) -> u32 { - 0 - } -} - -impl From>, ()>> for TestMessagesProof { - fn from(result: Result>, ()>) -> Self { - Self { - result: result.map(|messages| { - let mut messages_by_lane: BTreeMap>> = - BTreeMap::new(); - for message in messages { - messages_by_lane - .entry(message.key.lane_id) - .or_default() - .messages - .push(message); - } - messages_by_lane.into_iter().collect() - }), - } - } -} - -/// Messages delivery proof used in tests. -#[derive(Debug, Encode, Decode, Eq, Clone, PartialEq)] -pub struct TestMessagesDeliveryProof(pub Result<(LaneId, InboundLaneData), ()>); - -impl Size for TestMessagesDeliveryProof { - fn size_hint(&self) -> u32 { - 0 - } -} - -/// Target header chain that is used in tests. -#[derive(Debug, Default)] -pub struct TestTargetHeaderChain; - -impl TargetHeaderChain for TestTargetHeaderChain { - type Error = &'static str; - - type MessagesDeliveryProof = TestMessagesDeliveryProof; - - fn verify_message(payload: &TestPayload) -> Result<(), Self::Error> { - if *payload == PAYLOAD_REJECTED_BY_TARGET_CHAIN { - Err(TEST_ERROR) - } else { - Ok(()) - } - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - proof.0.map_err(|_| TEST_ERROR) - } -} - -/// Lane message verifier that is used in tests. -#[derive(Debug, Default)] -pub struct TestLaneMessageVerifier; - -impl LaneMessageVerifier for TestLaneMessageVerifier { - type Error = &'static str; - - fn verify_message( - _submitter: &Sender, - delivery_and_dispatch_fee: &TestMessageFee, - _lane: &LaneId, - _lane_outbound_data: &OutboundLaneData, - _payload: &TestPayload, - ) -> Result<(), Self::Error> { - if *delivery_and_dispatch_fee != 0 { - Ok(()) - } else { - Err(TEST_ERROR) - } - } -} - -/// Message fee payment system that is used in tests. -#[derive(Debug, Default)] -pub struct TestMessageDeliveryAndDispatchPayment; - -impl TestMessageDeliveryAndDispatchPayment { - /// Reject all payments. - pub fn reject_payments() { - frame_support::storage::unhashed::put(b":reject-message-fee:", &true); - } - - /// Returns true if given fee has been paid by given submitter. - pub fn is_fee_paid(submitter: AccountId, fee: TestMessageFee) -> bool { - frame_support::storage::unhashed::get(b":message-fee:") == Some((Sender::Signed(submitter), fee)) - } - - /// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is - /// cleared after the call. - pub fn is_reward_paid(relayer: AccountId, fee: TestMessageFee) -> bool { - let key = (b":relayer-reward:", relayer, fee).encode(); - frame_support::storage::unhashed::take::(&key).is_some() - } -} - -impl MessageDeliveryAndDispatchPayment for TestMessageDeliveryAndDispatchPayment { - type Error = &'static str; - - fn pay_delivery_and_dispatch_fee( - submitter: &Sender, - fee: &TestMessageFee, - _relayer_fund_account: &AccountId, - ) -> Result<(), Self::Error> { - if frame_support::storage::unhashed::get(b":reject-message-fee:") == Some(true) { - return Err(TEST_ERROR); - } - - frame_support::storage::unhashed::put(b":message-fee:", &(submitter, fee)); - Ok(()) - } - - fn pay_relayers_rewards( - _confirmation_relayer: &AccountId, - relayers_rewards: RelayersRewards, - _relayer_fund_account: &AccountId, - ) { - for (relayer, reward) in relayers_rewards { - let key = (b":relayer-reward:", relayer, reward.reward).encode(); - frame_support::storage::unhashed::put(&key, &true); - } - } -} - -/// Source header chain that is used in tests. -#[derive(Debug)] -pub struct TestSourceHeaderChain; - -impl SourceHeaderChain for TestSourceHeaderChain { - type Error = &'static str; - - type MessagesProof = TestMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result>, Self::Error> { - proof - .result - .map(|proof| proof.into_iter().collect()) - .map_err(|_| TEST_ERROR) - } -} - -/// Source header chain that is used in tests. -#[derive(Debug)] -pub struct TestMessageDispatch; - -impl MessageDispatch for TestMessageDispatch { - type DispatchPayload = TestPayload; - - fn dispatch_weight(message: &DispatchMessage) -> Weight { - match message.data.payload.as_ref() { - Ok(payload) => payload.1, - Err(_) => 0, - } - } - - fn dispatch(_message: DispatchMessage) {} -} - -/// Return test lane message with given nonce and payload. -pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message { - Message { - key: MessageKey { - lane_id: TEST_LANE_ID, - nonce, - }, - data: message_data(payload), - } -} - -/// Return message data with valid fee for given payload. -pub fn message_data(payload: TestPayload) -> MessageData { - MessageData { - payload: payload.encode(), - fee: 1, - } -} - -/// Run message lane test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(ENDOWED_ACCOUNT, 1_000_000)], - } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(test) -} diff --git a/polkadot/bridges/modules/message-lane/src/outbound_lane.rs b/polkadot/bridges/modules/message-lane/src/outbound_lane.rs deleted file mode 100644 index 8496d7f8c026d25f4b379538f81d3c398f4aa748..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/outbound_lane.rs +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything about outgoing messages sending. - -use bp_message_lane::{LaneId, MessageData, MessageNonce, OutboundLaneData}; - -/// Outbound lane storage. -pub trait OutboundLaneStorage { - /// Delivery and dispatch fee type on source chain. - type MessageFee; - - /// Lane id. - fn id(&self) -> LaneId; - /// Get lane data from the storage. - fn data(&self) -> OutboundLaneData; - /// Update lane data in the storage. - fn set_data(&mut self, data: OutboundLaneData); - /// Returns saved outbound message payload. - #[cfg(test)] - fn message(&self, nonce: &MessageNonce) -> Option>; - /// Save outbound message in the storage. - fn save_message(&mut self, nonce: MessageNonce, message_data: MessageData); - /// Remove outbound message from the storage. - fn remove_message(&mut self, nonce: &MessageNonce); -} - -/// Outbound messages lane. -pub struct OutboundLane { - storage: S, -} - -impl OutboundLane { - /// Create new inbound lane backed by given storage. - pub fn new(storage: S) -> Self { - OutboundLane { storage } - } - - /// Get this lane data. - pub fn data(&self) -> OutboundLaneData { - self.storage.data() - } - - /// Send message over lane. - /// - /// Returns new message nonce. - pub fn send_message(&mut self, message_data: MessageData) -> MessageNonce { - let mut data = self.storage.data(); - let nonce = data.latest_generated_nonce + 1; - data.latest_generated_nonce = nonce; - - self.storage.save_message(nonce, message_data); - self.storage.set_data(data); - - nonce - } - - /// Confirm messages delivery. - /// - /// Returns `None` if confirmation is wrong/duplicate. - /// Returns `Some` with inclusive ranges of message nonces that have been received. - pub fn confirm_delivery(&mut self, latest_received_nonce: MessageNonce) -> Option<(MessageNonce, MessageNonce)> { - let mut data = self.storage.data(); - if latest_received_nonce <= data.latest_received_nonce || latest_received_nonce > data.latest_generated_nonce { - return None; - } - - let prev_latest_received_nonce = data.latest_received_nonce; - data.latest_received_nonce = latest_received_nonce; - self.storage.set_data(data); - - Some((prev_latest_received_nonce + 1, latest_received_nonce)) - } - - /// Prune at most `max_messages_to_prune` already received messages. - /// - /// Returns number of pruned messages. - pub fn prune_messages(&mut self, max_messages_to_prune: MessageNonce) -> MessageNonce { - let mut pruned_messages = 0; - let mut anything_changed = false; - let mut data = self.storage.data(); - while pruned_messages < max_messages_to_prune && data.oldest_unpruned_nonce <= data.latest_received_nonce { - self.storage.remove_message(&data.oldest_unpruned_nonce); - - anything_changed = true; - pruned_messages += 1; - data.oldest_unpruned_nonce += 1; - } - - if anything_changed { - self.storage.set_data(data); - } - - pruned_messages - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{message_data, run_test, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID}, - outbound_lane, - }; - - #[test] - fn send_message_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - assert_eq!(lane.storage.data().latest_generated_nonce, 0); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 1); - assert!(lane.storage.message(&1).is_some()); - assert_eq!(lane.storage.data().latest_generated_nonce, 1); - }); - } - - #[test] - fn confirm_delivery_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 1); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 2); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 3); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!(lane.confirm_delivery(3), Some((1, 3))); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - }); - } - - #[test] - fn confirm_delivery_rejects_nonce_lesser_than_latest_received() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!(lane.confirm_delivery(3), Some((1, 3))); - assert_eq!(lane.confirm_delivery(3), None); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - - assert_eq!(lane.confirm_delivery(2), None); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - }); - } - - #[test] - fn confirm_delivery_rejects_nonce_larger_than_last_generated() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!(lane.confirm_delivery(10), None); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - }); - } - - #[test] - fn prune_messages_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - // when lane is empty, nothing is pruned - assert_eq!(lane.prune_messages(100), 0); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); - // when nothing is confirmed, nothing is pruned - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.prune_messages(100), 0); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); - // after confirmation, some messages are received - assert_eq!(lane.confirm_delivery(2), Some((1, 2))); - assert_eq!(lane.prune_messages(100), 2); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3); - // after last message is confirmed, everything is pruned - assert_eq!(lane.confirm_delivery(3), Some((3, 3))); - assert_eq!(lane.prune_messages(100), 1); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4); - }); - } -} diff --git a/polkadot/bridges/modules/message-lane/src/weights.rs b/polkadot/bridges/modules/message-lane/src/weights.rs deleted file mode 100644 index b0ec6522b2c6edc2ef2ea4a644918df4fbcd7a4b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/weights.rs +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for pallet_message_lane -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 -//! DATE: 2021-02-11, STEPS: [50, ], REPEAT: 20 -//! LOW RANGE: [], HIGH RANGE: [] -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled -//! CHAIN: Some("local"), DB CACHE: 128 - -// Executed Command: -// target/release/rialto-bridge-node -// benchmark -// --chain=local -// --steps=50 -// --repeat=20 -// --pallet=pallet_message_lane -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/message-lane/src/weights.rs -// --template=./.maintain/rialto-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_message_lane. -pub trait WeightInfo { - fn send_minimal_message_worst_case() -> Weight; - fn send_1_kb_message_worst_case() -> Weight; - fn send_16_kb_message_worst_case() -> Weight; - fn increase_message_fee() -> Weight; - fn receive_single_message_proof() -> Weight; - fn receive_two_messages_proof() -> Weight; - fn receive_single_message_proof_with_outbound_lane_state() -> Weight; - fn receive_single_message_proof_1_kb() -> Weight; - fn receive_single_message_proof_16_kb() -> Weight; - fn receive_delivery_proof_for_single_message() -> Weight; - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight; - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight; - fn send_messages_of_various_lengths(i: u32) -> Weight; - fn receive_multiple_messages_proof(i: u32) -> Weight; - fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight; - fn receive_message_proofs_with_large_leaf(i: u32) -> Weight; - fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight; - fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight; - fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight; -} - -/// Weights for pallet_message_lane using the Rialto node and recommended hardware. -pub struct RialtoWeight(PhantomData); -impl WeightInfo for RialtoWeight { - fn send_minimal_message_worst_case() -> Weight { - (140_645_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - } - fn send_1_kb_message_worst_case() -> Weight { - (146_434_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - } - fn send_16_kb_message_worst_case() -> Weight { - (214_721_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - } - fn increase_message_fee() -> Weight { - (8_395_221_000 as Weight) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof() -> Weight { - (156_390_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_two_messages_proof() -> Weight { - (269_316_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (174_342_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_single_message_proof_1_kb() -> Weight { - (186_621_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_single_message_proof_16_kb() -> Weight { - (487_028_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_delivery_proof_for_single_message() -> Weight { - (144_893_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (151_134_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (212_650_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - fn send_messages_of_various_lengths(i: u32) -> Weight { - (88_670_000 as Weight) - .saturating_add((5_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(4 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - } - fn receive_multiple_messages_proof(i: u32) -> Weight { - (0 as Weight) - .saturating_add((125_956_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (462_389_000 as Weight) - .saturating_add((11_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (120_744_000 as Weight) - .saturating_add((8_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { - (0 as Weight) - .saturating_add((130_087_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (126_833_000 as Weight) - .saturating_add((7_793_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (71_269_000 as Weight) - .saturating_add((72_377_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - fn send_minimal_message_worst_case() -> Weight { - (140_645_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - } - fn send_1_kb_message_worst_case() -> Weight { - (146_434_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - } - fn send_16_kb_message_worst_case() -> Weight { - (214_721_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - } - fn increase_message_fee() -> Weight { - (8_395_221_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof() -> Weight { - (156_390_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_two_messages_proof() -> Weight { - (269_316_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (174_342_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_single_message_proof_1_kb() -> Weight { - (186_621_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_single_message_proof_16_kb() -> Weight { - (487_028_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_delivery_proof_for_single_message() -> Weight { - (144_893_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (151_134_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (212_650_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } - fn send_messages_of_various_lengths(i: u32) -> Weight { - (88_670_000 as Weight) - .saturating_add((5_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(4 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - } - fn receive_multiple_messages_proof(i: u32) -> Weight { - (0 as Weight) - .saturating_add((125_956_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { - (462_389_000 as Weight) - .saturating_add((11_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { - (120_744_000 as Weight) - .saturating_add((8_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { - (0 as Weight) - .saturating_add((130_087_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { - (126_833_000 as Weight) - .saturating_add((7_793_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { - (71_269_000 as Weight) - .saturating_add((72_377_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) - } -} diff --git a/polkadot/bridges/modules/message-lane/src/weights_ext.rs b/polkadot/bridges/modules/message-lane/src/weights_ext.rs deleted file mode 100644 index d99a20007dac1ae967dd38beea2d4d70e8c48bc4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/message-lane/src/weights_ext.rs +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Weight-related utilities. - -use crate::weights::WeightInfo; - -use bp_message_lane::{MessageNonce, UnrewardedRelayersState}; -use bp_runtime::{PreComputedSize, Size}; -use frame_support::weights::Weight; - -/// Size of the message being delivered in benchmarks. -pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128; - -/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of calls -/// we're checking here would fit 1KB. -const SIGNED_EXTENSIONS_SIZE: u32 = 1024; - -/// Ensure that weights from `WeightInfoExt` implementation are looking correct. -pub fn ensure_weights_are_correct( - expected_default_message_delivery_tx_weight: Weight, - expected_additional_byte_delivery_weight: Weight, - expected_messages_delivery_confirmation_tx_weight: Weight, -) { - // verify `send_message` weight components - assert_ne!(W::send_message_overhead(), 0); - assert_ne!(W::send_message_size_overhead(0), 0); - - // verify `receive_messages_proof` weight components - assert_ne!(W::receive_messages_proof_overhead(), 0); - assert_ne!(W::receive_messages_proof_messages_overhead(1), 0); - assert_ne!(W::receive_messages_proof_outbound_lane_state_overhead(), 0); - assert_ne!(W::storage_proof_size_overhead(1), 0); - - // verify that the hardcoded value covers `receive_messages_proof` weight - let actual_single_regular_message_delivery_tx_weight = W::receive_messages_proof_weight( - &PreComputedSize((EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize), - 1, - 0, - ); - assert!( - actual_single_regular_message_delivery_tx_weight <= expected_default_message_delivery_tx_weight, - "Default message delivery transaction weight {} is larger than expected weight {}", - actual_single_regular_message_delivery_tx_weight, - expected_default_message_delivery_tx_weight, - ); - - // verify that hardcoded value covers additional byte length of `receive_messages_proof` weight - let actual_additional_byte_delivery_weight = W::storage_proof_size_overhead(1); - assert!( - actual_additional_byte_delivery_weight <= expected_additional_byte_delivery_weight, - "Single additional byte delivery weight {} is larger than expected weight {}", - actual_additional_byte_delivery_weight, - expected_additional_byte_delivery_weight, - ); - - // verify `receive_messages_delivery_proof` weight components - assert_ne!(W::receive_messages_delivery_proof_overhead(), 0); - assert_ne!(W::receive_messages_delivery_proof_messages_overhead(1), 0); - assert_ne!(W::receive_messages_delivery_proof_relayers_overhead(1), 0); - assert_ne!(W::storage_proof_size_overhead(1), 0); - - // verify that the hardcoded value covers `receive_messages_delivery_proof` weight - let actual_messages_delivery_confirmation_tx_weight = W::receive_messages_delivery_proof_weight( - &PreComputedSize(W::expected_extra_storage_proof_size() as usize), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - ); - assert!( - actual_messages_delivery_confirmation_tx_weight <= expected_messages_delivery_confirmation_tx_weight, - "Messages delivery confirmation transaction weight {} is larger than expected weight {}", - actual_messages_delivery_confirmation_tx_weight, - expected_messages_delivery_confirmation_tx_weight, - ); -} - -/// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain. -pub fn ensure_able_to_receive_message( - max_extrinsic_size: u32, - max_extrinsic_weight: Weight, - max_incoming_message_proof_size: u32, - // This is a base weight (which includes cost of tx itself, per-byte cost, adjusted per-byte cost) of single - // message delivery transaction that brings `max_incoming_message_proof_size` proof. - max_incoming_message_proof_base_weight: Weight, - max_incoming_message_dispatch_weight: Weight, -) { - // verify that we're able to receive proof of maximal-size message - let max_delivery_transaction_size = max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); - assert!( - max_delivery_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery transaction {} + {} is larger than maximal possible transaction size {}", - max_incoming_message_proof_size, - SIGNED_EXTENSIONS_SIZE, - max_extrinsic_size, - ); - - // verify that we're able to receive proof of maximal-size message with maximal dispatch weight - let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight( - &PreComputedSize((max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize), - 1, - max_incoming_message_dispatch_weight, - ); - let max_delivery_transaction_weight = - max_incoming_message_proof_base_weight.saturating_add(max_delivery_transaction_dispatch_weight); - assert!( - max_delivery_transaction_weight <= max_extrinsic_weight, - "Weight of maximal message delivery transaction {} + {} is larger than maximal possible transaction weight {}", - max_delivery_transaction_weight, - max_delivery_transaction_dispatch_weight, - max_extrinsic_weight, - ); -} - -/// Ensure that we're able to receive maximal confirmation from other chain. -pub fn ensure_able_to_receive_confirmation( - max_extrinsic_size: u32, - max_extrinsic_weight: Weight, - max_inbound_lane_data_proof_size_from_peer_chain: u32, - max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce, - max_unconfirmed_messages_at_inbound_lane: MessageNonce, - // This is a base weight (which includes cost of tx itself, per-byte cost, adjusted per-byte cost) of single - // confirmation transaction that brings `max_inbound_lane_data_proof_size_from_peer_chain` proof. - max_incoming_delivery_proof_base_weight: Weight, -) { - // verify that we're able to receive confirmation of maximal-size - let max_confirmation_transaction_size = - max_inbound_lane_data_proof_size_from_peer_chain.saturating_add(SIGNED_EXTENSIONS_SIZE); - assert!( - max_confirmation_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery confirmation transaction {} + {} is larger than maximal possible transaction size {}", - max_inbound_lane_data_proof_size_from_peer_chain, - SIGNED_EXTENSIONS_SIZE, - max_extrinsic_size, - ); - - // verify that we're able to reward maximal number of relayers that have delivered maximal number of messages - let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight( - &PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize), - &UnrewardedRelayersState { - unrewarded_relayer_entries: max_unrewarded_relayer_entries_at_peer_inbound_lane, - total_messages: max_unconfirmed_messages_at_inbound_lane, - ..Default::default() - }, - ); - let max_confirmation_transaction_weight = - max_incoming_delivery_proof_base_weight.saturating_add(max_confirmation_transaction_dispatch_weight); - assert!( - max_confirmation_transaction_weight <= max_extrinsic_weight, - "Weight of maximal confirmation transaction {} + {} is larger than maximal possible transaction weight {}", - max_incoming_delivery_proof_base_weight, - max_confirmation_transaction_dispatch_weight, - max_extrinsic_weight, - ); -} - -/// Extended weight info. -pub trait WeightInfoExt: WeightInfo { - /// Size of proof that is already included in the single message delivery weight. - /// - /// The message submitter (at source chain) has already covered this cost. But there are two - /// factors that may increase proof size: (1) the message size may be larger than predefined - /// and (2) relayer may add extra trie nodes to the proof. So if proof size is larger than - /// this value, we're going to charge relayer for that. - fn expected_extra_storage_proof_size() -> u32; - - // Functions that are directly mapped to extrinsics weights. - - /// Weight of message send extrinsic. - fn send_message_weight(message: &impl Size) -> Weight { - let transaction_overhead = Self::send_message_overhead(); - let message_size_overhead = Self::send_message_size_overhead(message.size_hint()); - - transaction_overhead.saturating_add(message_size_overhead) - } - - /// Weight of message delivery extrinsic. - fn receive_messages_proof_weight(proof: &impl Size, messages_count: u32, dispatch_weight: Weight) -> Weight { - // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_proof_overhead(); - let outbound_state_delivery_weight = Self::receive_messages_proof_outbound_lane_state_overhead(); - let messages_delivery_weight = - Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count)); - let messages_dispatch_weight = dispatch_weight; - - // proof size overhead weight - let expected_proof_size = EXPECTED_DEFAULT_MESSAGE_LENGTH - .saturating_mul(messages_count.saturating_sub(1)) - .saturating_add(Self::expected_extra_storage_proof_size()); - let actual_proof_size = proof.size_hint(); - let proof_size_overhead = - Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size)); - - transaction_overhead - .saturating_add(outbound_state_delivery_weight) - .saturating_add(messages_delivery_weight) - .saturating_add(messages_dispatch_weight) - .saturating_add(proof_size_overhead) - } - - /// Weight of confirmation delivery extrinsic. - fn receive_messages_delivery_proof_weight(proof: &impl Size, relayers_state: &UnrewardedRelayersState) -> Weight { - // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_delivery_proof_overhead(); - let messages_overhead = Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages); - let relayers_overhead = - Self::receive_messages_delivery_proof_relayers_overhead(relayers_state.unrewarded_relayer_entries); - - // proof size overhead weight - let expected_proof_size = Self::expected_extra_storage_proof_size(); - let actual_proof_size = proof.size_hint(); - let proof_size_overhead = - Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size)); - - transaction_overhead - .saturating_add(messages_overhead) - .saturating_add(relayers_overhead) - .saturating_add(proof_size_overhead) - } - - // Functions that are used by extrinsics weights formulas. - - /// Returns weight of message send transaction (`send_message`). - fn send_message_overhead() -> Weight { - Self::send_minimal_message_worst_case() - } - - /// Returns weight that needs to be accounted when message of given size is sent (`send_message`). - fn send_message_size_overhead(message_size: u32) -> Weight { - let message_size_in_kb = (1024u64 + message_size as u64) / 1024; - let single_kb_weight = (Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15; - message_size_in_kb * single_kb_weight - } - - /// Returns weight overhead of message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = Self::receive_single_message_proof().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving given number of messages with message - /// delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof(); - weight_of_two_messages_and_single_tx_overhead - .saturating_sub(weight_of_single_message_and_single_tx_overhead) - .saturating_mul(messages as Weight) - } - - /// Returns weight that needs to be accounted when message delivery transaction (`receive_messages_proof`) - /// is carrying outbound lane state proof. - fn receive_messages_proof_outbound_lane_state_overhead() -> Weight { - let weight_of_single_message_and_lane_state = Self::receive_single_message_proof_with_outbound_lane_state(); - let weight_of_single_message = Self::receive_single_message_proof(); - weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message) - } - - /// Returns weight overhead of delivery confirmation transaction (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = - Self::receive_delivery_proof_for_single_message().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving confirmations for given number of - /// messages with delivery confirmation transaction (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages = Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - let weight_of_single_message = Self::receive_delivery_proof_for_single_message(); - weight_of_two_messages - .saturating_sub(weight_of_single_message) - .saturating_mul(messages as Weight) - } - - /// Returns weight that needs to be accounted when receiving confirmations for given number of - /// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight { - let weight_of_two_messages_by_two_relayers = Self::receive_delivery_proof_for_two_messages_by_two_relayers(); - let weight_of_two_messages_by_single_relayer = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - weight_of_two_messages_by_two_relayers - .saturating_sub(weight_of_two_messages_by_single_relayer) - .saturating_mul(relayers as Weight) - } - - /// Returns weight that needs to be accounted when storage proof of given size is recieved (either in - /// `receive_messages_proof` or `receive_messages_delivery_proof`). - /// - /// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof - /// size depends on messages count or number of entries in the unrewarded relayers set. So this - /// shouldn't be added to cost of transaction, but instead should act as a minimal cost that the - /// relayer must pay when it relays proof of given size (even if cost based on other parameters - /// is less than that cost). - fn storage_proof_size_overhead(proof_size: u32) -> Weight { - let proof_size_in_bytes = proof_size as Weight; - let byte_weight = - (Self::receive_single_message_proof_16_kb() - Self::receive_single_message_proof_1_kb()) / (15 * 1024); - proof_size_in_bytes * byte_weight - } -} - -impl WeightInfoExt for () { - fn expected_extra_storage_proof_size() -> u32 { - bp_rialto::EXTRA_STORAGE_PROOF_SIZE - } -} - -impl WeightInfoExt for crate::weights::RialtoWeight { - fn expected_extra_storage_proof_size() -> u32 { - bp_rialto::EXTRA_STORAGE_PROOF_SIZE - } -} diff --git a/polkadot/bridges/modules/shift-session-manager/Cargo.toml b/polkadot/bridges/modules/shift-session-manager/Cargo.toml deleted file mode 100644 index 6dac97ddde601eff0ddef5cb64dde0bae9ed5b17..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/shift-session-manager/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "pallet-shift-session-manager" -description = "A Substrate Runtime module that selects 2/3 of initial validators for every session" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -serde = "1.0" - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "pallet-session/std", - "sp-staking/std", - "sp-std/std", -] diff --git a/polkadot/bridges/modules/shift-session-manager/src/lib.rs b/polkadot/bridges/modules/shift-session-manager/src/lib.rs deleted file mode 100644 index a463d868b19199bf6b5a30caa3d967297c4a400c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/shift-session-manager/src/lib.rs +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate session manager that selects 2/3 validators from initial set, -//! starting from session 2. - -#![cfg_attr(not(feature = "std"), no_std)] - -use frame_support::{decl_module, decl_storage}; -use sp_std::prelude::*; - -/// The module configuration trait. -pub trait Config: pallet_session::Config {} - -decl_module! { - /// Shift session manager pallet. - pub struct Module for enum Call where origin: T::Origin {} -} - -decl_storage! { - trait Store for Module as ShiftSessionManager { - /// Validators of first two sessions. - InitialValidators: Option>; - } -} - -impl pallet_session::SessionManager for Module { - fn end_session(_: sp_staking::SessionIndex) {} - fn start_session(_: sp_staking::SessionIndex) {} - fn new_session(session_index: sp_staking::SessionIndex) -> Option> { - // we don't want to add even more fields to genesis config => just return None - if session_index == 0 || session_index == 1 { - return None; - } - - // the idea that on first call (i.e. when session 1 ends) we're reading current - // set of validators from session module (they are initial validators) and save - // in our 'local storage'. - // then for every session we select (deterministically) 2/3 of these initial - // validators to serve validators of new session - let available_validators = InitialValidators::::get().unwrap_or_else(|| { - let validators = >::validators(); - InitialValidators::::put(validators.clone()); - validators - }); - - Some(Self::select_validators(session_index, &available_validators)) - } -} - -impl Module { - /// Select validators for session. - fn select_validators( - session_index: sp_staking::SessionIndex, - available_validators: &[T::ValidatorId], - ) -> Vec { - let available_validators_count = available_validators.len(); - let count = sp_std::cmp::max(1, 2 * available_validators_count / 3); - let offset = session_index as usize % available_validators_count; - let end = offset + count; - let session_validators = match end.overflowing_sub(available_validators_count) { - (wrapped_end, false) if wrapped_end != 0 => available_validators[offset..] - .iter() - .chain(available_validators[..wrapped_end].iter()) - .cloned() - .collect(), - _ => available_validators[offset..end].to_vec(), - }; - - session_validators - } -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use frame_support::sp_io::TestExternalities; - use frame_support::sp_runtime::{ - testing::{Header, UintAuthorityId}, - traits::{BlakeTwo256, ConvertInto, IdentityLookup}, - Perbill, RuntimeAppPublic, - }; - use frame_support::{parameter_types, weights::Weight, BasicExternalities}; - use sp_core::H256; - - type AccountId = u64; - - type Block = frame_system::mocking::MockBlock; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - - frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Session: pallet_session::{Pallet}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - } - - parameter_types! { - pub const Period: u64 = 1; - pub const Offset: u64 = 0; - } - - impl pallet_session::Config for TestRuntime { - type Event = (); - type ValidatorId = ::AccountId; - type ValidatorIdOf = ConvertInto; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = (); - type SessionHandler = TestSessionHandler; - type Keys = UintAuthorityId; - type DisabledValidatorsThreshold = (); - type WeightInfo = (); - } - - impl Config for TestRuntime {} - - pub struct TestSessionHandler; - impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; - - fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} - - fn on_new_session(_: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)]) {} - - fn on_disabled(_: usize) {} - } - - fn new_test_ext() -> TestExternalities { - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - - let keys = vec![ - (1, 1, UintAuthorityId(1)), - (2, 2, UintAuthorityId(2)), - (3, 3, UintAuthorityId(3)), - (4, 4, UintAuthorityId(4)), - (5, 5, UintAuthorityId(5)), - ]; - - BasicExternalities::execute_with_storage(&mut t, || { - for (ref k, ..) in &keys { - frame_system::Pallet::::inc_providers(k); - } - }); - - pallet_session::GenesisConfig:: { keys } - .assimilate_storage(&mut t) - .unwrap(); - TestExternalities::new(t) - } - - #[test] - fn shift_session_manager_works() { - new_test_ext().execute_with(|| { - let all_accs = vec![1, 2, 3, 4, 5]; - - // at least 1 validator is selected - assert_eq!(Module::::select_validators(0, &[1]), vec![1],); - - // at session#0, shift is also 0 - assert_eq!(Module::::select_validators(0, &all_accs), vec![1, 2, 3],); - - // at session#1, shift is also 1 - assert_eq!(Module::::select_validators(1, &all_accs), vec![2, 3, 4],); - - // at session#3, we're wrapping - assert_eq!(Module::::select_validators(3, &all_accs), vec![4, 5, 1],); - - // at session#5, we're starting from the beginning again - assert_eq!(Module::::select_validators(5, &all_accs), vec![1, 2, 3],); - }); - } -} diff --git a/polkadot/bridges/modules/substrate/Cargo.toml b/polkadot/bridges/modules/substrate/Cargo.toml deleted file mode 100644 index 490aa2098b9ffe61096048d6198046165214de1a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/substrate/Cargo.toml +++ /dev/null @@ -1,53 +0,0 @@ -[package] -name = "pallet-substrate-bridge" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -finality-grandpa = { version = "0.14.0", default-features = false } -hash-db = { version = "0.15.2", default-features = false } -serde = { version = "1.0", optional = true } - -# Bridge Dependencies - -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -bp-test-utils = {path = "../../primitives/test-utils" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-runtime/std", - "bp-header-chain/std", - "codec/std", - "finality-grandpa/std", - "frame-support/std", - "frame-system/std", - "hash-db/std", - "serde", - "sp-finality-grandpa/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", -] -runtime-benchmarks = [] diff --git a/polkadot/bridges/modules/substrate/src/fork_tests.rs b/polkadot/bridges/modules/substrate/src/fork_tests.rs deleted file mode 100644 index 445ffd8ce500858f774902b5cc3369dd93aefea9..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/substrate/src/fork_tests.rs +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for checking that behaviour of importing headers and finality proofs works correctly. -//! -//! The tests are built around the idea that we will be importing headers on different forks and we -//! should be able to check that we're correctly importing headers, scheduling changes, and -//! finalizing headers across different forks. -//! -//! Each test is depicted using beautiful ASCII art. The symbols used in the tests are the -//! following: -//! -//! - S|N: Schedules change in N blocks -//! - E: Enacts change -//! - F: Finalized -//! - FN: Finality proof imported for header N -//! -//! Each diagram also comes with an import order. This is important since we expect things to fail -//! when headers or proofs are imported in a certain order. -//! -//! Tests can be read as follows: -//! -//! ## Example Import 1 -//! -//! (Type::Header(2, 1, None, None), Ok(())) -//! -//! Import header 2 on fork 1. This does not create a fork, or schedule an authority set change. We -//! expect this header import to be succesful. -//! -//! ## Example Import 2 -//! -//! (Type::Header(4, 2, Some((3, 1)), Some(0)), Ok(())) -//! -//! Import header 4 on fork 2. This header starts a new fork from header 3 on fork 1. It also -//! schedules a change with a delay of 0 blocks. It should be succesfully imported. -//! -//! ## Example Import 3 -//! -//! (Type::Finality(2, 1), Err(FinalizationError::OldHeader.into())) -//! -//! Import a finality proof for header 2 on fork 1. This finalty proof should fail to be imported -//! because the header is an old header. - -use crate::mock::*; -use crate::storage::ImportedHeader; -use crate::verifier::*; -use crate::{BestFinalized, BestHeight, BridgeStorage, NextScheduledChange, PalletStorage}; -use bp_header_chain::AuthoritySet; -use bp_test_utils::{alice, authority_list, bob, make_justification_for_header}; -use codec::Encode; -use frame_support::{IterableStorageMap, StorageValue}; -use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; -use sp_runtime::{Digest, DigestItem}; -use std::collections::BTreeMap; - -type ForkId = u64; -type Delay = u64; - -// Indicates when to start a new fork. The first item in the tuple -// will be the parent header of the header starting this fork. -type ForksAt = Option<(TestNumber, ForkId)>; -type ScheduledChangeAt = Option; - -#[derive(Debug)] -enum Type { - Header(TestNumber, ForkId, ForksAt, ScheduledChangeAt), - Finality(TestNumber, ForkId), -} - -// Order: 1, 2, 2', 3, 3'' -// -// / [3''] -// / [2'] -// [1] <- [2] <- [3] -#[test] -fn fork_can_import_headers_on_different_forks() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, None), Ok(())), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - (Type::Header(3, 3, Some((2, 2)), None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - - let best_headers = storage.best_headers(); - assert_eq!(best_headers.len(), 2); - assert_eq!(>::get(), 3); - }) -} - -// Order: 1, 2, 2', F2, F2' -// -// [1] <- [2: F] -// \ [2'] -// -// Not allowed to finalize 2' -#[test] -fn fork_does_not_allow_competing_finality_proofs() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, None), Ok(())), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Finality(2, 1), Ok(())), - (Type::Finality(2, 2), Err(FinalizationError::OldHeader.into())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 3, F2, 3 -// -// [1] <- [2: S|0] <- [3] -// -// Not allowed to import 3 until we get F2 -// -// Note: GRANDPA would technically allow 3 to be imported as long as it didn't try and enact an -// authority set change. However, since we expect finality proofs to be imported quickly we've -// decided to simplify our import process and disallow header imports until we get a finality proof. -#[test] -fn fork_waits_for_finality_proof_before_importing_header_past_one_which_enacts_a_change() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - ( - Type::Header(3, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, F2, 3 -// -// [1] <- [2: S|1] <- [3: S|0] -// -// GRANDPA can have multiple authority set changes pending on the same fork. However, we've decided -// to introduce a limit of _one_ pending authority set change per fork in order to simplify pallet -// logic and to prevent DoS attacks if GRANDPA finality were to temporarily stall for a long time -// (we'd have to perform a lot of expensive ancestry checks to catch back up). -#[test] -fn fork_does_not_allow_multiple_scheduled_changes_on_the_same_fork() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(1)), Ok(())), - ( - Type::Header(3, 1, None, Some(0)), - Err(ImportError::PendingAuthoritySetChange.into()), - ), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, Some(0)), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 2' -// -// / [2': S|0] -// [1] <- [2: S|0] -// -// Both 2 and 2' should be marked as needing justifications since they enact changes. -#[test] -fn fork_correctly_tracks_which_headers_require_finality_proofs() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - (Type::Header(2, 2, Some((1, 1)), Some(0)), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - - let header_ids = storage.missing_justifications(); - assert_eq!(header_ids.len(), 2); - assert!(header_ids[0].hash != header_ids[1].hash); - assert_eq!(header_ids[0].number, 2); - assert_eq!(header_ids[1].number, 2); - }) -} - -// Order: 1, 2, 2', 3', F2, 3, 4' -// -// / [2': S|1] <- [3'] <- [4'] -// [1] <- [2: S|0] <- [3] -// -// -// Not allowed to import 3 or 4' -// Can only import 3 after we get the finality proof for 2 -#[test] -fn fork_does_not_allow_importing_past_header_that_enacts_changes_on_forks() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - (Type::Header(2, 2, Some((1, 1)), Some(1)), Ok(())), - ( - Type::Header(3, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Header(3, 2, None, None), Ok(())), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - ( - Type::Header(4, 2, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - ]; - - create_chain(&mut storage, &mut chain); - - // Since we can't query the map directly to check if we applied the right authority set - // change (we don't know the header hash of 2) we need to get a little clever. - let mut next_change = >::iter(); - let (_, scheduled_change_on_fork) = next_change.next().unwrap(); - assert_eq!(scheduled_change_on_fork.height, 3); - - // Sanity check to make sure we enacted the change on the canonical change - assert_eq!(next_change.next(), None); - }) -} - -// Order: 1, 2, 3, 2', 3' -// -// / [2'] <- [3'] -// [1] <- [2: S|0] <- [3] -// -// Not allowed to import 3 -// Fine to import 2' and 3' -#[test] -fn fork_allows_importing_on_different_fork_while_waiting_for_finality_proof() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - ( - Type::Header(3, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Header(3, 2, None, None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 2', F2, 3, 3' -// -// / [2'] <- [3'] -// [1] <- [2: F] <- [3] -// -// In our current implementation we're allowed to keep building on fork 2 for as long as our hearts' -// content. However, we'll never be able to finalize anything on that fork. We'd have to check for -// ancestry with `best_finalized` on every import which will get expensive. -// -// I think this is fine as long as we run pruning every so often to clean up these dead forks. -#[test] -fn fork_allows_importing_on_different_fork_past_finalized_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(0)), Ok(())), - (Type::Header(2, 2, Some((1, 1)), None), Ok(())), - (Type::Finality(2, 1), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - (Type::Header(3, 2, None, None), Ok(())), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -// Order: 1, 2, 3, 4, 3', 4' -// -// / [3': E] <- [4'] -// [1] <- [2: S|1] <- [3: E] <- [4] -// -// Not allowed to import {4|4'} -#[test] -fn fork_can_track_scheduled_changes_across_forks() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut chain = vec![ - (Type::Header(1, 1, None, None), Ok(())), - (Type::Header(2, 1, None, Some(1)), Ok(())), - (Type::Header(3, 1, None, None), Ok(())), - ( - Type::Header(4, 1, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - (Type::Header(3, 2, Some((2, 1)), None), Ok(())), - ( - Type::Header(4, 2, None, None), - Err(ImportError::AwaitingFinalityProof.into()), - ), - ]; - - create_chain(&mut storage, &mut chain); - }) -} - -#[derive(Debug, PartialEq)] -enum TestError { - Import(ImportError), - Finality(FinalizationError), -} - -impl From for TestError { - fn from(e: ImportError) -> Self { - TestError::Import(e) - } -} - -impl From for TestError { - fn from(e: FinalizationError) -> Self { - TestError::Finality(e) - } -} - -// Builds a fork-aware representation of a blockchain given a list of headers. -// -// Takes a list of headers and finality proof operations which will be applied in order. The -// expected outcome for each operation is also required. -// -// The first header in the list will be used as the genesis header and will be manually imported -// into storage. -fn create_chain(storage: &mut S, chain: &mut Vec<(Type, Result<(), TestError>)>) -where - S: BridgeStorage
+ Clone, -{ - let mut map = BTreeMap::new(); - let mut verifier = Verifier { - storage: storage.clone(), - }; - initialize_genesis(storage, &mut map, chain.remove(0).0); - - for h in chain { - match h { - (Type::Header(num, fork_id, does_fork, schedules_change), expected_result) => { - // If we've never seen this fork before - if !map.contains_key(&fork_id) { - // Let's get the info about where to start the fork - if let Some((parent_num, forked_from_id)) = does_fork { - let fork = &*map.get(&forked_from_id).unwrap(); - let parent = fork - .iter() - .find(|h| h.number == *parent_num) - .expect("Trying to fork on a parent which doesn't exist"); - - let mut header = test_header(*num); - header.parent_hash = parent.hash(); - header.state_root = [*fork_id as u8; 32].into(); - - if let Some(delay) = schedules_change { - header.digest = change_log(*delay); - } - - // Try and import into storage - let res = verifier - .import_header(header.hash(), header.clone()) - .map_err(TestError::Import); - assert_eq!( - res, *expected_result, - "Expected {:?} while importing header ({}, {}), got {:?}", - *expected_result, *num, *fork_id, res, - ); - - // Let's mark the header down in a new fork - if res.is_ok() { - map.insert(*fork_id, vec![header]); - } - } - } else { - // We've seen this fork before so let's append our new header to it - let parent_hash = { - let fork = &*map.get(&fork_id).unwrap(); - fork.last().unwrap().hash() - }; - - let mut header = test_header(*num); - header.parent_hash = parent_hash; - - // Doing this to make sure headers at the same height but on - // different forks have different hashes - header.state_root = [*fork_id as u8; 32].into(); - - if let Some(delay) = schedules_change { - header.digest = change_log(*delay); - } - - let res = verifier - .import_header(header.hash(), header.clone()) - .map_err(TestError::Import); - assert_eq!( - res, *expected_result, - "Expected {:?} while importing header ({}, {}), got {:?}", - *expected_result, *num, *fork_id, res, - ); - - if res.is_ok() { - map.get_mut(&fork_id).unwrap().push(header); - } - } - } - (Type::Finality(num, fork_id), expected_result) => { - let header = map[fork_id] - .iter() - .find(|h| h.number == *num) - .expect("Trying to finalize block that doesn't exist"); - - // This is technically equivocating (accepting the same justification on the same - // `grandpa_round`). - // - // See for more: https://github.com/paritytech/parity-bridges-common/issues/430 - let grandpa_round = 1; - let set_id = 1; - let authorities = authority_list(); - let justification = make_justification_for_header(header, grandpa_round, set_id, &authorities).encode(); - - let res = verifier - .import_finality_proof(header.hash(), justification.into()) - .map_err(TestError::Finality); - assert_eq!( - res, *expected_result, - "Expected {:?} while importing finality proof for header ({}, {}), got {:?}", - *expected_result, *num, *fork_id, res, - ); - } - } - } - - for (key, value) in map.iter() { - println!("{}: {:#?}", key, value); - } -} - -fn initialize_genesis(storage: &mut S, map: &mut BTreeMap>, genesis: Type) -where - S: BridgeStorage
, -{ - if let Type::Header(num, fork_id, None, None) = genesis { - let genesis = test_header(num); - map.insert(fork_id, vec![genesis.clone()]); - - let genesis = ImportedHeader { - header: genesis, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - - >::put(genesis.hash()); - storage.write_header(&genesis); - } else { - panic!("Unexpected genesis block format {:#?}", genesis) - } - - let set_id = 1; - let authorities = authority_list(); - let authority_set = AuthoritySet::new(authorities, set_id); - storage.update_current_authority_set(authority_set); -} - -pub(crate) fn change_log(delay: u64) -> Digest { - let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { - next_authorities: vec![(alice(), 1), (bob(), 1)], - delay, - }); - - Digest:: { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - } -} diff --git a/polkadot/bridges/modules/substrate/src/lib.rs b/polkadot/bridges/modules/substrate/src/lib.rs deleted file mode 100644 index c14db8596f89ee13bf5cfb2f407038eece2d9ff3..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/substrate/src/lib.rs +++ /dev/null @@ -1,1013 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate Bridge Pallet -//! -//! This pallet is an on-chain light client for chains which have a notion of finality. -//! -//! It has a simple interface for achieving this. First it can import headers to the runtime -//! storage. During this it will check the validity of the headers and ensure they don't conflict -//! with any existing headers (e.g they're on a different finalized chain). Secondly it can finalize -//! an already imported header (and its ancestors) given a valid GRANDPA justification. -//! -//! With these two functions the pallet is able to form a "source of truth" for what headers have -//! been finalized on a given Substrate chain. This can be a useful source of info for other -//! higher-level applications. - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use crate::storage::ImportedHeader; -use bp_header_chain::AuthoritySet; -use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; -use frame_support::{ - decl_error, decl_module, decl_storage, dispatch::DispatchResult, ensure, traits::Get, weights::DispatchClass, -}; -use frame_system::{ensure_signed, RawOrigin}; -use sp_runtime::traits::Header as HeaderT; -use sp_runtime::{traits::BadOrigin, RuntimeDebug}; -use sp_std::{marker::PhantomData, prelude::*}; -use sp_trie::StorageProof; - -// Re-export since the node uses these when configuring genesis -pub use storage::{InitializationData, ScheduledChange}; - -pub use storage_proof::StorageProofChecker; - -mod storage; -mod storage_proof; -mod verifier; - -#[cfg(test)] -mod mock; - -#[cfg(test)] -mod fork_tests; - -/// Block number of the bridged chain. -pub(crate) type BridgedBlockNumber = BlockNumberOf<::BridgedChain>; -/// Block hash of the bridged chain. -pub(crate) type BridgedBlockHash = HashOf<::BridgedChain>; -/// Hasher of the bridged chain. -pub(crate) type BridgedBlockHasher = HasherOf<::BridgedChain>; -/// Header of the bridged chain. -pub(crate) type BridgedHeader = HeaderOf<::BridgedChain>; - -/// A convenience type identifying headers. -#[derive(RuntimeDebug, PartialEq)] -pub struct HeaderId { - /// The block number of the header. - pub number: H::Number, - /// The hash of the header. - pub hash: H::Hash, -} - -pub trait Config: frame_system::Config { - /// Chain that we are bridging here. - type BridgedChain: Chain; -} - -decl_storage! { - trait Store for Module as SubstrateBridge { - /// Hash of the header used to bootstrap the pallet. - InitialHash: BridgedBlockHash; - /// The number of the highest block(s) we know of. - BestHeight: BridgedBlockNumber; - /// Hash of the header at the highest known height. - /// - /// If there are multiple headers at the same "best" height - /// this will contain all of their hashes. - BestHeaders: Vec>; - /// Hash of the best finalized header. - BestFinalized: BridgedBlockHash; - /// The set of header IDs (number, hash) which enact an authority set change and therefore - /// require a GRANDPA justification. - RequiresJustification: map hasher(identity) BridgedBlockHash => BridgedBlockNumber; - /// Headers which have been imported into the pallet. - ImportedHeaders: map hasher(identity) BridgedBlockHash => Option>>; - /// The current GRANDPA Authority set. - CurrentAuthoritySet: AuthoritySet; - /// The next scheduled authority set change for a given fork. - /// - /// The fork is indicated by the header which _signals_ the change (key in the mapping). - /// Note that this is different than a header which _enacts_ a change. - // GRANDPA doesn't require there to always be a pending change. In fact, most of the time - // there will be no pending change available. - NextScheduledChange: map hasher(identity) BridgedBlockHash => Option>>; - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - ModuleOwner get(fn module_owner): Option; - /// If true, all pallet transactions are failed immediately. - IsHalted get(fn is_halted): bool; - } - add_extra_genesis { - config(owner): Option; - config(init_data): Option>>; - build(|config| { - if let Some(ref owner) = config.owner { - >::put(owner); - } - - if let Some(init_data) = config.init_data.clone() { - initialize_bridge::(init_data); - } else { - // Since the bridge hasn't been initialized we shouldn't allow anyone to perform - // transactions. - IsHalted::put(true); - } - }) - } -} - -decl_error! { - pub enum Error for Module { - /// This header has failed basic verification. - InvalidHeader, - /// This header has not been finalized. - UnfinalizedHeader, - /// The header is unknown. - UnknownHeader, - /// The storage proof doesn't contains storage root. So it is invalid for given header. - StorageRootMismatch, - /// Error when trying to fetch storage value from the proof. - StorageValueUnavailable, - /// All pallet operations are halted. - Halted, - /// The pallet has already been initialized. - AlreadyInitialized, - /// The given header is not a descendant of a particular header. - NotDescendant, - } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// Import a signed Substrate header into the runtime. - /// - /// This will perform some basic checks to make sure it is fine to - /// import into the runtime. However, it does not perform any checks - /// related to finality. - // TODO: Update weights [#78] - #[weight = 0] - pub fn import_signed_header( - origin, - header: BridgedHeader, - ) -> DispatchResult { - ensure_operational::()?; - let _ = ensure_signed(origin)?; - let hash = header.hash(); - frame_support::debug::trace!("Going to import header {:?}: {:?}", hash, header); - - let mut verifier = verifier::Verifier { - storage: PalletStorage::::new(), - }; - - let _ = verifier - .import_header(hash, header) - .map_err(|e| { - frame_support::debug::error!("Failed to import header {:?}: {:?}", hash, e); - >::InvalidHeader - })?; - - frame_support::debug::trace!("Successfully imported header: {:?}", hash); - - Ok(()) - } - - /// Import a finalty proof for a particular header. - /// - /// This will take care of finalizing any already imported headers - /// which get finalized when importing this particular proof, as well - /// as updating the current and next validator sets. - // TODO: Update weights [#78] - #[weight = 0] - pub fn finalize_header( - origin, - hash: BridgedBlockHash, - finality_proof: Vec, - ) -> DispatchResult { - ensure_operational::()?; - let _ = ensure_signed(origin)?; - frame_support::debug::trace!("Going to finalize header: {:?}", hash); - - let mut verifier = verifier::Verifier { - storage: PalletStorage::::new(), - }; - - let _ = verifier - .import_finality_proof(hash, finality_proof.into()) - .map_err(|e| { - frame_support::debug::error!("Failed to finalize header {:?}: {:?}", hash, e); - >::UnfinalizedHeader - })?; - - frame_support::debug::trace!("Successfully finalized header: {:?}", hash); - - Ok(()) - } - - /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. - /// - /// The initial configuration provided does not need to be the genesis header of the bridged - /// chain, it can be any arbirary header. You can also provide the next scheduled set change - /// if it is already know. - /// - /// This function is only allowed to be called from a trusted origin and writes to storage - /// with practically no checks in terms of the validity of the data. It is important that - /// you ensure that valid data is being passed in. - //TODO: Update weights [#78] - #[weight = 0] - pub fn initialize( - origin, - init_data: InitializationData>, - ) { - ensure_owner_or_root::(origin)?; - let init_allowed = !>::exists(); - ensure!(init_allowed, >::AlreadyInitialized); - initialize_bridge::(init_data.clone()); - - frame_support::debug::info!( - "Pallet has been initialized with the following parameters: {:?}", init_data - ); - } - - /// Change `ModuleOwner`. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn set_owner(origin, new_owner: Option) { - ensure_owner_or_root::(origin)?; - match new_owner { - Some(new_owner) => { - ModuleOwner::::put(&new_owner); - frame_support::debug::info!("Setting pallet Owner to: {:?}", new_owner); - }, - None => { - ModuleOwner::::kill(); - frame_support::debug::info!("Removed Owner of pallet."); - }, - } - } - - /// Halt all pallet operations. Operations may be resumed using `resume_operations` call. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn halt_operations(origin) { - ensure_owner_or_root::(origin)?; - IsHalted::put(true); - frame_support::debug::warn!("Stopping pallet operations."); - } - - /// Resume all pallet operations. May be called even if pallet is halted. - /// - /// May only be called either by root, or by `ModuleOwner`. - #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] - pub fn resume_operations(origin) { - ensure_owner_or_root::(origin)?; - IsHalted::put(false); - frame_support::debug::info!("Resuming pallet operations."); - } - } -} - -impl Module { - /// Get the highest header(s) that the pallet knows of. - pub fn best_headers() -> Vec<(BridgedBlockNumber, BridgedBlockHash)> { - PalletStorage::::new() - .best_headers() - .iter() - .map(|id| (id.number, id.hash)) - .collect() - } - - /// Get the best finalized header the pallet knows of. - /// - /// Returns a dummy header if there is no best header. This can only happen - /// if the pallet has not been initialized yet. - /// - /// Since this has been finalized correctly a user of the bridge - /// pallet should be confident that any transactions that were - /// included in this or any previous header will not be reverted. - pub fn best_finalized() -> BridgedHeader { - PalletStorage::::new().best_finalized_header().header - } - - /// Check if a particular header is known to the bridge pallet. - pub fn is_known_header(hash: BridgedBlockHash) -> bool { - PalletStorage::::new().header_exists(hash) - } - - /// Check if a particular header is finalized. - /// - /// Will return false if the header is not known to the pallet. - // One thing worth noting here is that this approach won't work well - // once we track forks since there could be an older header on a - // different fork which isn't an ancestor of our best finalized header. - pub fn is_finalized_header(hash: BridgedBlockHash) -> bool { - let storage = PalletStorage::::new(); - if let Some(header) = storage.header_by_hash(hash) { - header.is_finalized - } else { - false - } - } - - /// Returns a list of headers which require finality proofs. - /// - /// These headers require proofs because they enact authority set changes. - pub fn require_justifications() -> Vec<(BridgedBlockNumber, BridgedBlockHash)> { - PalletStorage::::new() - .missing_justifications() - .iter() - .map(|id| (id.number, id.hash)) - .collect() - } - - /// Verify that the passed storage proof is valid, given it is crafted using - /// known finalized header. If the proof is valid, then the `parse` callback - /// is called and the function returns its result. - pub fn parse_finalized_storage_proof( - finalized_header_hash: BridgedBlockHash, - storage_proof: StorageProof, - parse: impl FnOnce(StorageProofChecker>) -> R, - ) -> Result { - let storage = PalletStorage::::new(); - let header = storage - .header_by_hash(finalized_header_hash) - .ok_or(Error::::UnknownHeader)?; - if !header.is_finalized { - return Err(Error::::UnfinalizedHeader.into()); - } - - let storage_proof_checker = - StorageProofChecker::new(*header.state_root(), storage_proof).map_err(Error::::from)?; - Ok(parse(storage_proof_checker)) - } -} - -impl bp_header_chain::HeaderChain, sp_runtime::DispatchError> for Module { - fn best_finalized() -> BridgedHeader { - PalletStorage::::new().best_finalized_header().header - } - - fn authority_set() -> AuthoritySet { - PalletStorage::::new().current_authority_set() - } - - fn append_header(header: BridgedHeader) { - import_header_unchecked::<_, T>(&mut PalletStorage::::new(), header); - } -} - -/// Import a finalized header without checking if this is true. -/// -/// This function assumes that all the given header has already been proven to be valid and -/// finalized. Using this assumption it will write them to storage with minimal checks. That -/// means it's of great importance that this function *not* called with any headers whose -/// finality has not been checked, otherwise you risk bricking your bridge. -/// -/// One thing this function does do for you is GRANDPA authority set handoffs. However, since it -/// does not do verification on the incoming header it will assume that the authority set change -/// signals in the digest are well formed. -fn import_header_unchecked(storage: &mut S, header: BridgedHeader) -where - S: BridgeStorage
>, - T: Config, -{ - // Since we want to use the existing storage infrastructure we need to indicate the fork - // that we're on. We will assume that since we are using the unchecked import there are no - // forks, and can indicate that by using the first imported header's "fork". - let dummy_fork_hash = >::get(); - - // If we have a pending change in storage let's check if the current header enacts it. - let enact_change = if let Some(pending_change) = storage.scheduled_set_change(dummy_fork_hash) { - pending_change.height == *header.number() - } else { - // We don't have a scheduled change in storage at the moment. Let's check if the current - // header signals an authority set change. - if let Some(change) = verifier::find_scheduled_change(&header) { - let next_set = AuthoritySet { - authorities: change.next_authorities, - set_id: storage.current_authority_set().set_id + 1, - }; - - let height = *header.number() + change.delay; - let scheduled_change = ScheduledChange { - authority_set: next_set, - height, - }; - - storage.schedule_next_set_change(dummy_fork_hash, scheduled_change); - - // If the delay is 0 this header will enact the change it signaled - height == *header.number() - } else { - false - } - }; - - if enact_change { - const ENACT_SET_PROOF: &str = "We only set `enact_change` as `true` if we are sure that there is a scheduled - authority set change in storage. Therefore, it must exist."; - - // If we are unable to enact an authority set it means our storage entry for scheduled - // changes is missing. Best to crash since this is likely a bug. - let _ = storage.enact_authority_set(dummy_fork_hash).expect(ENACT_SET_PROOF); - } - - storage.update_best_finalized(header.hash()); - - storage.write_header(&ImportedHeader { - header, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }); -} - -/// Ensure that the origin is either root, or `ModuleOwner`. -fn ensure_owner_or_root(origin: T::Origin) -> Result<(), BadOrigin> { - match origin.into() { - Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) if Some(signer) == >::module_owner().as_ref() => Ok(()), - _ => Err(BadOrigin), - } -} - -/// Ensure that the pallet is in operational mode (not halted). -fn ensure_operational() -> Result<(), Error> { - if IsHalted::get() { - Err(>::Halted) - } else { - Ok(()) - } -} - -/// (Re)initialize bridge with given header for using it in external benchmarks. -#[cfg(feature = "runtime-benchmarks")] -pub fn initialize_for_benchmarks(header: HeaderOf) { - initialize_bridge::(InitializationData { - header, - authority_list: Vec::new(), // we don't verify any proofs in external benchmarks - set_id: 0, - scheduled_change: None, - is_halted: false, - }); -} - -/// Since this writes to storage with no real checks this should only be used in functions that were -/// called by a trusted origin. -fn initialize_bridge(init_params: InitializationData>) { - let InitializationData { - header, - authority_list, - set_id, - scheduled_change, - is_halted, - } = init_params; - - let initial_hash = header.hash(); - - let mut signal_hash = None; - if let Some(ref change) = scheduled_change { - assert!( - change.height > *header.number(), - "Changes must be scheduled past initial header." - ); - - signal_hash = Some(initial_hash); - >::insert(initial_hash, change); - }; - - >::put(initial_hash); - >::put(header.number()); - >::put(vec![initial_hash]); - >::put(initial_hash); - - let authority_set = AuthoritySet::new(authority_list, set_id); - CurrentAuthoritySet::put(authority_set); - - >::insert( - initial_hash, - ImportedHeader { - header, - requires_justification: false, - is_finalized: true, - signal_hash, - }, - ); - - IsHalted::put(is_halted); -} - -/// Expected interface for interacting with bridge pallet storage. -// TODO: This should be split into its own less-Substrate-dependent crate -pub trait BridgeStorage { - /// The header type being used by the pallet. - type Header: HeaderT; - - /// Write a header to storage. - fn write_header(&mut self, header: &ImportedHeader); - - /// Get the header(s) at the highest known height. - fn best_headers(&self) -> Vec>; - - /// Get the best finalized header the pallet knows of. - /// - /// Returns None if there is no best header. This can only happen if the pallet - /// has not been initialized yet. - fn best_finalized_header(&self) -> ImportedHeader; - - /// Update the best finalized header the pallet knows of. - fn update_best_finalized(&self, hash: ::Hash); - - /// Check if a particular header is known to the pallet. - fn header_exists(&self, hash: ::Hash) -> bool; - - /// Returns a list of headers which require justifications. - /// - /// A header will require a justification if it enacts a new authority set. - fn missing_justifications(&self) -> Vec>; - - /// Get a specific header by its hash. - /// - /// Returns None if it is not known to the pallet. - fn header_by_hash(&self, hash: ::Hash) -> Option>; - - /// Get the current GRANDPA authority set. - fn current_authority_set(&self) -> AuthoritySet; - - /// Update the current GRANDPA authority set. - /// - /// Should only be updated when a scheduled change has been triggered. - fn update_current_authority_set(&self, new_set: AuthoritySet); - - /// Replace the current authority set with the next scheduled set. - /// - /// Returns an error if there is no scheduled authority set to enact. - #[allow(clippy::result_unit_err)] - fn enact_authority_set(&mut self, signal_hash: ::Hash) -> Result<(), ()>; - - /// Get the next scheduled GRANDPA authority set change. - fn scheduled_set_change( - &self, - signal_hash: ::Hash, - ) -> Option::Number>>; - - /// Schedule a GRANDPA authority set change in the future. - /// - /// Takes the hash of the header which scheduled this particular change. - fn schedule_next_set_change( - &mut self, - signal_hash: ::Hash, - next_change: ScheduledChange<::Number>, - ); -} - -/// Used to interact with the pallet storage in a more abstract way. -#[derive(Default, Clone)] -pub struct PalletStorage(PhantomData); - -impl PalletStorage { - fn new() -> Self { - Self(PhantomData::::default()) - } -} - -impl BridgeStorage for PalletStorage { - type Header = BridgedHeader; - - fn write_header(&mut self, header: &ImportedHeader>) { - use core::cmp::Ordering; - - let hash = header.hash(); - let current_height = header.number(); - let best_height = >::get(); - - match current_height.cmp(&best_height) { - Ordering::Equal => { - // Want to avoid duplicates in the case where we're writing a finalized header to - // storage which also happens to be at the best height the best height - let not_duplicate = !>::contains_key(hash); - if not_duplicate { - >::append(hash); - } - } - Ordering::Greater => { - >::kill(); - >::append(hash); - >::put(current_height); - } - Ordering::Less => { - // This is fine. We can still have a valid header, but it might just be on a - // different fork and at a lower height than the "best" overall header. - } - } - - if header.requires_justification { - >::insert(hash, current_height); - } else { - // If the key doesn't exist this is a no-op, so it's fine to call it often - >::remove(hash); - } - - >::insert(hash, header); - } - - fn best_headers(&self) -> Vec>> { - let number = >::get(); - >::get() - .iter() - .map(|hash| HeaderId { number, hash: *hash }) - .collect() - } - - fn best_finalized_header(&self) -> ImportedHeader> { - // We will only construct a dummy header if the pallet is not initialized and someone tries - // to use the public module interface (not dispatchables) to get the best finalized header. - // This is an edge case since this can only really happen when bootstrapping the bridge. - let hash = >::get(); - self.header_by_hash(hash).unwrap_or_else(|| ImportedHeader { - header: >::new( - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ), - requires_justification: false, - is_finalized: false, - signal_hash: None, - }) - } - - fn update_best_finalized(&self, hash: BridgedBlockHash) { - >::put(hash); - } - - fn header_exists(&self, hash: BridgedBlockHash) -> bool { - >::contains_key(hash) - } - - fn header_by_hash(&self, hash: BridgedBlockHash) -> Option>> { - >::get(hash) - } - - fn missing_justifications(&self) -> Vec>> { - >::iter() - .map(|(hash, number)| HeaderId { number, hash }) - .collect() - } - - fn current_authority_set(&self) -> AuthoritySet { - CurrentAuthoritySet::get() - } - - fn update_current_authority_set(&self, new_set: AuthoritySet) { - CurrentAuthoritySet::put(new_set) - } - - fn enact_authority_set(&mut self, signal_hash: BridgedBlockHash) -> Result<(), ()> { - let new_set = >::take(signal_hash).ok_or(())?.authority_set; - self.update_current_authority_set(new_set); - - Ok(()) - } - - fn scheduled_set_change(&self, signal_hash: BridgedBlockHash) -> Option>> { - >::get(signal_hash) - } - - fn schedule_next_set_change( - &mut self, - signal_hash: BridgedBlockHash, - next_change: ScheduledChange>, - ) { - >::insert(signal_hash, next_change) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, test_header, unfinalized_header, Origin, TestHeader, TestRuntime}; - use bp_header_chain::HeaderChain; - use bp_test_utils::{alice, authority_list, bob}; - use frame_support::{assert_noop, assert_ok}; - use sp_runtime::DispatchError; - - fn init_with_origin(origin: Origin) -> Result, DispatchError> { - let init_data = InitializationData { - header: test_header(1), - authority_list: authority_list(), - set_id: 1, - scheduled_change: None, - is_halted: false, - }; - - Module::::initialize(origin, init_data.clone()).map(|_| init_data) - } - - #[test] - fn init_root_or_owner_origin_can_initialize_pallet() { - run_test(|| { - assert_noop!(init_with_origin(Origin::signed(1)), DispatchError::BadOrigin); - assert_ok!(init_with_origin(Origin::root())); - - // Reset storage so we can initialize the pallet again - BestFinalized::::kill(); - ModuleOwner::::put(2); - assert_ok!(init_with_origin(Origin::signed(2))); - }) - } - - #[test] - fn init_storage_entries_are_correctly_initialized() { - run_test(|| { - assert!(Module::::best_headers().is_empty()); - assert_eq!(Module::::best_finalized(), test_header(0)); - - let init_data = init_with_origin(Origin::root()).unwrap(); - - let storage = PalletStorage::::new(); - assert!(storage.header_exists(init_data.header.hash())); - assert_eq!( - storage.best_headers()[0], - crate::HeaderId { - number: *init_data.header.number(), - hash: init_data.header.hash() - } - ); - assert_eq!(storage.best_finalized_header().hash(), init_data.header.hash()); - assert_eq!(storage.current_authority_set().authorities, init_data.authority_list); - assert_eq!(IsHalted::get(), false); - }) - } - - #[test] - fn init_can_only_initialize_pallet_once() { - run_test(|| { - assert_ok!(init_with_origin(Origin::root())); - assert_noop!( - init_with_origin(Origin::root()), - >::AlreadyInitialized - ); - }) - } - - #[test] - fn pallet_owner_may_change_owner() { - run_test(|| { - ModuleOwner::::put(2); - - assert_ok!(Module::::set_owner(Origin::root(), Some(1))); - assert_noop!( - Module::::halt_operations(Origin::signed(2)), - DispatchError::BadOrigin, - ); - assert_ok!(Module::::halt_operations(Origin::root())); - - assert_ok!(Module::::set_owner(Origin::signed(1), None)); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - assert_noop!( - Module::::resume_operations(Origin::signed(2)), - DispatchError::BadOrigin, - ); - assert_ok!(Module::::resume_operations(Origin::root())); - }); - } - - #[test] - fn pallet_may_be_halted_by_root() { - run_test(|| { - assert_ok!(Module::::halt_operations(Origin::root())); - assert_ok!(Module::::resume_operations(Origin::root())); - }); - } - - #[test] - fn pallet_may_be_halted_by_owner() { - run_test(|| { - ModuleOwner::::put(2); - - assert_ok!(Module::::halt_operations(Origin::signed(2))); - assert_ok!(Module::::resume_operations(Origin::signed(2))); - - assert_noop!( - Module::::halt_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - - assert_ok!(Module::::halt_operations(Origin::signed(2))); - assert_noop!( - Module::::resume_operations(Origin::signed(1)), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - IsHalted::put(true); - - assert_noop!( - Module::::import_signed_header(Origin::signed(1), test_header(1)), - Error::::Halted, - ); - - assert_noop!( - Module::::finalize_header(Origin::signed(1), test_header(1).hash(), vec![]), - Error::::Halted, - ); - }) - } - - #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { - run_test(|| { - assert_noop!( - Module::::parse_finalized_storage_proof( - Default::default(), - StorageProof::new(vec![]), - |_| (), - ), - Error::::UnknownHeader, - ); - }); - } - - #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unfinalized_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let header = unfinalized_header(1); - storage.write_header(&header); - - assert_noop!( - Module::::parse_finalized_storage_proof( - header.header.hash(), - StorageProof::new(vec![]), - |_| (), - ), - Error::::UnfinalizedHeader, - ); - }); - } - - #[test] - fn parse_finalized_storage_accepts_valid_proof() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let (state_root, storage_proof) = storage_proof::tests::craft_valid_storage_proof(); - let mut header = unfinalized_header(1); - header.is_finalized = true; - header.header.set_state_root(state_root); - storage.write_header(&header); - - assert_ok!( - Module::::parse_finalized_storage_proof(header.header.hash(), storage_proof, |_| (),), - (), - ); - }); - } - - #[test] - fn importing_unchecked_headers_works() { - run_test(|| { - init_with_origin(Origin::root()).unwrap(); - let storage = PalletStorage::::new(); - - let header = test_header(2); - Module::::append_header(header.clone()); - - assert!(storage.header_by_hash(header.hash()).unwrap().is_finalized); - assert_eq!(storage.best_finalized_header().header, header); - assert_eq!(storage.best_headers()[0].hash, header.hash()); - }) - } - - #[test] - fn importing_unchecked_headers_enacts_new_authority_set() { - run_test(|| { - init_with_origin(Origin::root()).unwrap(); - let storage = PalletStorage::::new(); - - let next_set_id = 2; - let next_authorities = vec![(alice(), 1), (bob(), 1)]; - - // Need to update the header digest to indicate that our header signals an authority set - // change. The change will be enacted when we import our header. - let mut header = test_header(2); - header.digest = fork_tests::change_log(0); - - // Let's import our test header - Module::::append_header(header.clone()); - - // Make sure that our header is the best finalized - assert_eq!(storage.best_finalized_header().header, header); - assert_eq!(storage.best_headers()[0].hash, header.hash()); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - storage.current_authority_set(), - AuthoritySet::new(next_authorities, next_set_id), - ); - }) - } - - #[test] - fn importing_unchecked_headers_enacts_new_authority_set_from_old_header() { - run_test(|| { - init_with_origin(Origin::root()).unwrap(); - let storage = PalletStorage::::new(); - - let next_set_id = 2; - let next_authorities = vec![(alice(), 1), (bob(), 1)]; - - // Need to update the header digest to indicate that our header signals an authority set - // change. However, the change doesn't happen until the next block. - let mut schedules_change = test_header(2); - schedules_change.digest = fork_tests::change_log(1); - let header = test_header(3); - - // Let's import our test headers - Module::::append_header(schedules_change); - Module::::append_header(header.clone()); - - // Make sure that our header is the best finalized - assert_eq!(storage.best_finalized_header().header, header); - assert_eq!(storage.best_headers()[0].hash, header.hash()); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - storage.current_authority_set(), - AuthoritySet::new(next_authorities, next_set_id), - ); - }) - } - - #[test] - fn importing_unchecked_header_can_enact_set_change_scheduled_at_genesis() { - run_test(|| { - let storage = PalletStorage::::new(); - - let next_authorities = vec![(alice(), 1)]; - let next_set_id = 2; - let next_authority_set = AuthoritySet::new(next_authorities.clone(), next_set_id); - - let first_scheduled_change = ScheduledChange { - authority_set: next_authority_set, - height: 2, - }; - - let init_data = InitializationData { - header: test_header(1), - authority_list: authority_list(), - set_id: 1, - scheduled_change: Some(first_scheduled_change), - is_halted: false, - }; - - assert_ok!(Module::::initialize(Origin::root(), init_data)); - - // We are expecting an authority set change at height 2, so this header should enact - // that upon being imported. - Module::::append_header(test_header(2)); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - storage.current_authority_set(), - AuthoritySet::new(next_authorities, next_set_id), - ); - }) - } -} diff --git a/polkadot/bridges/modules/substrate/src/mock.rs b/polkadot/bridges/modules/substrate/src/mock.rs deleted file mode 100644 index a205c09e83930a26b52d7a75909b988522430a94..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/substrate/src/mock.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Mock Runtime for Substrate Pallet Testing. -//! -//! Includes some useful testing types and functions. - -#![cfg(test)] -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use crate::{BridgedBlockHash, BridgedBlockNumber, BridgedHeader, Config}; -use bp_runtime::Chain; -use frame_support::{parameter_types, weights::Weight}; -use sp_runtime::{ - testing::{Header, H256}, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; -pub type TestHeader = BridgedHeader; -pub type TestNumber = BridgedBlockNumber; -pub type TestHash = BridgedBlockHash; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as pallet_substrate; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Substrate: pallet_substrate::{Pallet, Call}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = (); - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); -} - -impl Config for TestRuntime { - type BridgedChain = TestBridgedChain; -} - -#[derive(Debug)] -pub struct TestBridgedChain; - -impl Chain for TestBridgedChain { - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hashing; - type Header = ::Header; -} - -pub fn run_test(test: impl FnOnce() -> T) -> T { - sp_io::TestExternalities::new(Default::default()).execute_with(test) -} - -pub fn test_header(num: TestNumber) -> TestHeader { - // We wrap the call to avoid explicit type annotations in our tests - bp_test_utils::test_header(num) -} - -pub fn unfinalized_header(num: u64) -> crate::storage::ImportedHeader { - crate::storage::ImportedHeader { - header: test_header(num), - requires_justification: false, - is_finalized: false, - signal_hash: None, - } -} diff --git a/polkadot/bridges/modules/substrate/src/storage.rs b/polkadot/bridges/modules/substrate/src/storage.rs deleted file mode 100644 index 5b521306b2cd11b9e8d8e168c080739c4735f816..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/substrate/src/storage.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage primitives for the Substrate light client (a.k.a bridge) pallet. - -use bp_header_chain::AuthoritySet; -use codec::{Decode, Encode}; -use core::default::Default; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use sp_finality_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::Header as HeaderT; -use sp_runtime::RuntimeDebug; - -/// Data required for initializing the bridge pallet. -/// -/// The bridge needs to know where to start its sync from, and this provides that initial context. -#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct InitializationData { - /// The header from which we should start syncing. - pub header: H, - /// The initial authorities of the pallet. - pub authority_list: AuthorityList, - /// The ID of the initial authority set. - pub set_id: SetId, - /// The first scheduled authority set change of the pallet. - pub scheduled_change: Option>, - /// Should the pallet block transaction immediately after initialization. - pub is_halted: bool, -} - -/// Keeps track of when the next GRANDPA authority set change will occur. -#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct ScheduledChange { - /// The authority set that will be used once this change is enacted. - pub authority_set: AuthoritySet, - /// The block height at which the authority set should be enacted. - /// - /// Note: It will only be enacted once a header at this height is finalized. - pub height: N, -} - -/// A more useful representation of a header for storage purposes. -#[derive(Default, Encode, Decode, Clone, RuntimeDebug, PartialEq)] -pub struct ImportedHeader { - /// A plain Substrate header. - pub header: H, - /// Does this header enact a new authority set change. If it does - /// then it will require a justification. - pub requires_justification: bool, - /// Has this header been finalized, either explicitly via a justification, - /// or implicitly via one of its children getting finalized. - pub is_finalized: bool, - /// The hash of the header which scheduled a change on this fork. If there are currently - /// not pending changes on this fork this will be empty. - pub signal_hash: Option, -} - -impl core::ops::Deref for ImportedHeader { - type Target = H; - - fn deref(&self) -> &H { - &self.header - } -} diff --git a/polkadot/bridges/modules/substrate/src/storage_proof.rs b/polkadot/bridges/modules/substrate/src/storage_proof.rs deleted file mode 100644 index 4b908dde15e9ff4e546ef8d018fb862cd5a45797..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/substrate/src/storage_proof.rs +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// TODO: remove on actual use -#![allow(dead_code)] - -//! Logic for checking Substrate storage proofs. - -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; -use sp_runtime::RuntimeDebug; -use sp_std::vec::Vec; -use sp_trie::{read_trie_value, Layout, MemoryDB, StorageProof}; - -/// This struct is used to read storage values from a subset of a Merklized database. The "proof" -/// is a subset of the nodes in the Merkle structure of the database, so that it provides -/// authentication against a known Merkle root as well as the values in the database themselves. -pub struct StorageProofChecker -where - H: Hasher, -{ - root: H::Out, - db: MemoryDB, -} - -impl StorageProofChecker -where - H: Hasher, -{ - /// Constructs a new storage proof checker. - /// - /// This returns an error if the given proof is invalid with respect to the given root. - pub fn new(root: H::Out, proof: StorageProof) -> Result { - let db = proof.into_memory_db(); - if !db.contains(&root, EMPTY_PREFIX) { - return Err(Error::StorageRootMismatch); - } - - let checker = StorageProofChecker { root, db }; - Ok(checker) - } - - /// Reads a value from the available subset of storage. If the value cannot be read due to an - /// incomplete or otherwise invalid proof, this returns an error. - pub fn read_value(&self, key: &[u8]) -> Result>, Error> { - read_trie_value::, _>(&self.db, &self.root, key).map_err(|_| Error::StorageValueUnavailable) - } -} - -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - StorageRootMismatch, - StorageValueUnavailable, -} - -impl From for crate::Error { - fn from(error: Error) -> Self { - match error { - Error::StorageRootMismatch => crate::Error::StorageRootMismatch, - Error::StorageValueUnavailable => crate::Error::StorageValueUnavailable, - } - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - - use sp_core::{Blake2Hasher, H256}; - use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; - - /// Return valid storage proof and state root. - pub fn craft_valid_storage_proof() -> (H256, StorageProof) { - // construct storage proof - let backend = >::from(vec![ - (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), - (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), - (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), - // Value is too big to fit in a branch node - (None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]), - ]); - let root = backend.storage_root(std::iter::empty()).0; - let proof = StorageProof::new( - prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key22"[..]]) - .unwrap() - .iter_nodes() - .collect(), - ); - - (root, proof) - } - - #[test] - fn storage_proof_check() { - let (root, proof) = craft_valid_storage_proof(); - - // check proof in runtime - let checker = >::new(root, proof.clone()).unwrap(); - assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); - assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); - assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); - assert_eq!(checker.read_value(b"key22"), Ok(None)); - - // checking proof against invalid commitment fails - assert_eq!( - >::new(H256::random(), proof).err(), - Some(Error::StorageRootMismatch) - ); - } -} diff --git a/polkadot/bridges/modules/substrate/src/verifier.rs b/polkadot/bridges/modules/substrate/src/verifier.rs deleted file mode 100644 index 0c3bd1b5ddd497f0fa7a2b0e4f404b2a324d2d64..0000000000000000000000000000000000000000 --- a/polkadot/bridges/modules/substrate/src/verifier.rs +++ /dev/null @@ -1,871 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The verifier's role is to check the validity of headers being imported, and also determine if -//! they can be finalized. -//! -//! When importing headers it performs checks to ensure that no invariants are broken (like -//! importing the same header twice). When it imports finality proofs it will ensure that the proof -//! has been signed off by the correct GRANDPA authorities, and also enact any authority set changes -//! if required. - -use crate::storage::{ImportedHeader, ScheduledChange}; -use crate::BridgeStorage; - -use bp_header_chain::{justification::verify_justification, AuthoritySet}; -use finality_grandpa::voter_set::VoterSet; -use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; -use sp_runtime::generic::OpaqueDigestItemId; -use sp_runtime::traits::{CheckedAdd, Header as HeaderT, One}; -use sp_runtime::RuntimeDebug; -use sp_std::{prelude::Vec, vec}; - -/// The finality proof used by the pallet. -/// -/// For a Substrate based chain using GRANDPA this will -/// be an encoded GRANDPA Justification. -#[derive(RuntimeDebug)] -pub struct FinalityProof(Vec); - -impl From<&[u8]> for FinalityProof { - fn from(proof: &[u8]) -> Self { - Self(proof.to_vec()) - } -} - -impl From> for FinalityProof { - fn from(proof: Vec) -> Self { - Self(proof) - } -} - -/// Errors which can happen while importing a header. -#[derive(RuntimeDebug, PartialEq)] -pub enum ImportError { - /// This header is at the same height or older than our latest finalized block, thus not useful. - OldHeader, - /// This header has already been imported by the pallet. - HeaderAlreadyExists, - /// We're missing a parent for this header. - MissingParent, - /// The number of the header does not follow its parent's number. - InvalidChildNumber, - /// The height of the next authority set change overflowed. - ScheduledHeightOverflow, - /// Received an authority set which was invalid in some way, such as - /// the authority weights being empty or overflowing the `AuthorityWeight` - /// type. - InvalidAuthoritySet, - /// This header is not allowed to be imported since an ancestor requires a finality proof. - /// - /// This can happen if an ancestor is supposed to enact an authority set change. - AwaitingFinalityProof, - /// This header schedules an authority set change even though we're still waiting - /// for an old authority set change to be enacted on this fork. - PendingAuthoritySetChange, -} - -/// Errors which can happen while verifying a headers finality. -#[derive(RuntimeDebug, PartialEq)] -pub enum FinalizationError { - /// This header has never been imported by the pallet. - UnknownHeader, - /// Trying to prematurely import a justification - PrematureJustification, - /// We failed to verify this header's ancestry. - AncestryCheckFailed, - /// This header is at the same height or older than our latest finalized block, thus not useful. - OldHeader, - /// The given justification was not able to finalize the given header. - /// - /// There are several reasons why this might happen, such as the justification being - /// signed by the wrong authority set, being given alongside an unexpected header, - /// or failing ancestry checks. - InvalidJustification, -} - -/// Used to verify imported headers and their finality status. -#[derive(RuntimeDebug)] -pub struct Verifier { - pub storage: S, -} - -impl Verifier -where - S: BridgeStorage
, - H: HeaderT, - H::Number: finality_grandpa::BlockNumberOps, -{ - /// Import a header to the pallet. - /// - /// Will perform some basic checks to make sure that this header doesn't break any assumptions - /// such as being on a different finalized fork. - pub fn import_header(&mut self, hash: H::Hash, header: H) -> Result<(), ImportError> { - let best_finalized = self.storage.best_finalized_header(); - - if header.number() <= best_finalized.number() { - return Err(ImportError::OldHeader); - } - - if self.storage.header_exists(hash) { - return Err(ImportError::HeaderAlreadyExists); - } - - let parent_header = self - .storage - .header_by_hash(*header.parent_hash()) - .ok_or(ImportError::MissingParent)?; - - let parent_number = *parent_header.number(); - if parent_number + One::one() != *header.number() { - return Err(ImportError::InvalidChildNumber); - } - - // A header requires a justification if it enacts an authority set change. We don't - // need to act on it right away (we'll update the set once the header gets finalized), but - // we need to make a note of it. - // - // Note: This assumes that we can only have one authority set change pending per fork at a - // time. While this is not strictly true of GRANDPA (it can have multiple pending changes, - // even across forks), this assumption simplifies our tracking of authority set changes. - let mut signal_hash = parent_header.signal_hash; - let scheduled_change = find_scheduled_change(&header); - - // Check if our fork is expecting an authority set change - let requires_justification = if let Some(hash) = signal_hash { - const PROOF: &str = "If the header has a signal hash it means there's an accompanying set - change in storage, therefore this must always be valid."; - let pending_change = self.storage.scheduled_set_change(hash).expect(PROOF); - - if scheduled_change.is_some() { - return Err(ImportError::PendingAuthoritySetChange); - } - - if *header.number() > pending_change.height { - return Err(ImportError::AwaitingFinalityProof); - } - - pending_change.height == *header.number() - } else { - // Since we don't currently have a pending authority set change let's check if the header - // contains a log indicating when the next change should be. - if let Some(change) = scheduled_change { - let mut total_weight = 0u64; - - for (_id, weight) in &change.next_authorities { - total_weight = total_weight - .checked_add(*weight) - .ok_or(ImportError::InvalidAuthoritySet)?; - } - - // If none of the authorities have a weight associated with them the - // set is essentially empty. We don't want that. - if total_weight == 0 { - return Err(ImportError::InvalidAuthoritySet); - } - - let next_set = AuthoritySet { - authorities: change.next_authorities, - set_id: self.storage.current_authority_set().set_id + 1, - }; - - let height = (*header.number()) - .checked_add(&change.delay) - .ok_or(ImportError::ScheduledHeightOverflow)?; - - let scheduled_change = ScheduledChange { - authority_set: next_set, - height, - }; - - // Note: It's important that the signal hash is updated if a header schedules a - // change or else we end up with inconsistencies in other places. - signal_hash = Some(hash); - self.storage.schedule_next_set_change(hash, scheduled_change); - - // If the delay is 0 this header will enact the change it signaled - height == *header.number() - } else { - false - } - }; - - self.storage.write_header(&ImportedHeader { - header, - requires_justification, - is_finalized: false, - signal_hash, - }); - - Ok(()) - } - - /// Verify that a previously imported header can be finalized with the given GRANDPA finality - /// proof. If the header enacts an authority set change the change will be applied once the - /// header has been finalized. - pub fn import_finality_proof(&mut self, hash: H::Hash, proof: FinalityProof) -> Result<(), FinalizationError> { - // Make sure that we've previously imported this header - let header = self - .storage - .header_by_hash(hash) - .ok_or(FinalizationError::UnknownHeader)?; - - // We don't want to finalize an ancestor of an already finalized - // header, this would be inconsistent - let last_finalized = self.storage.best_finalized_header(); - if header.number() <= last_finalized.number() { - return Err(FinalizationError::OldHeader); - } - - let current_authority_set = self.storage.current_authority_set(); - let voter_set = VoterSet::new(current_authority_set.authorities).expect( - "We verified the correctness of the authority list during header import, - before writing them to storage. This must always be valid.", - ); - verify_justification::( - (hash, *header.number()), - current_authority_set.set_id, - voter_set, - &proof.0, - ) - .map_err(|_| FinalizationError::InvalidJustification)?; - frame_support::debug::trace!("Received valid justification for {:?}", header); - - frame_support::debug::trace!( - "Checking ancestry for headers between {:?} and {:?}", - last_finalized, - header - ); - let mut finalized_headers = - if let Some(ancestors) = headers_between(&self.storage, last_finalized, header.clone()) { - // Since we only try and finalize headers with a height strictly greater - // than `best_finalized` if `headers_between` returns Some we must have - // at least one element. If we don't something's gone wrong, so best - // to die before we write to storage. - assert_eq!( - ancestors.is_empty(), - false, - "Empty ancestry list returned from `headers_between()`", - ); - - // Check if any of our ancestors `requires_justification` a.k.a schedule authority - // set changes. If they're still waiting to be finalized we must reject this - // justification. We don't include our current header in this check. - // - // We do this because it is important to to import justifications _in order_, - // otherwise we risk finalizing headers on competing chains. - let requires_justification = ancestors.iter().skip(1).find(|h| h.requires_justification); - if requires_justification.is_some() { - return Err(FinalizationError::PrematureJustification); - } - - ancestors - } else { - return Err(FinalizationError::AncestryCheckFailed); - }; - - // If the current header was marked as `requires_justification` it means that it enacts a - // new authority set change. When we finalize the header we need to update the current - // authority set. - if header.requires_justification { - const SIGNAL_HASH_PROOF: &str = "When we import a header we only mark it as - `requires_justification` if we have checked that it contains a signal hash. Therefore - this must always be valid."; - - const ENACT_SET_PROOF: &str = - "Headers must only be marked as `requires_justification` if there's a scheduled change in storage."; - - // If we are unable to enact an authority set it means our storage entry for scheduled - // changes is missing. Best to crash since this is likely a bug. - let _ = self - .storage - .enact_authority_set(header.signal_hash.expect(SIGNAL_HASH_PROOF)) - .expect(ENACT_SET_PROOF); - } - - for header in finalized_headers.iter_mut() { - header.is_finalized = true; - header.requires_justification = false; - header.signal_hash = None; - self.storage.write_header(header); - } - - self.storage.update_best_finalized(hash); - - Ok(()) - } -} - -/// Returns the lineage of headers between [child, ancestor) -fn headers_between( - storage: &S, - ancestor: ImportedHeader, - child: ImportedHeader, -) -> Option>> -where - S: BridgeStorage
, - H: HeaderT, -{ - let mut ancestors = vec![]; - let mut current_header = child; - - while ancestor.hash() != current_header.hash() { - // We've gotten to the same height and we're not related - if ancestor.number() >= current_header.number() { - return None; - } - - let parent = storage.header_by_hash(*current_header.parent_hash()); - ancestors.push(current_header); - current_header = match parent { - Some(h) => h, - None => return None, - } - } - - Some(ancestors) -} - -pub(crate) fn find_scheduled_change(header: &H) -> Option> { - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - - let filter_log = |log: ConsensusLog| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }; - - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use crate::{BestFinalized, BestHeight, HeaderId, ImportedHeaders, PalletStorage}; - use bp_test_utils::{alice, authority_list, bob, make_justification_for_header}; - use codec::Encode; - use frame_support::{assert_err, assert_ok}; - use frame_support::{StorageMap, StorageValue}; - use sp_finality_grandpa::{AuthorityId, SetId}; - use sp_runtime::{Digest, DigestItem}; - - fn schedule_next_change( - authorities: Vec, - set_id: SetId, - height: TestNumber, - ) -> ScheduledChange { - let authorities = authorities.into_iter().map(|id| (id, 1u64)).collect(); - let authority_set = AuthoritySet::new(authorities, set_id); - ScheduledChange { authority_set, height } - } - - // Useful for quickly writing a chain of headers to storage - // Input is expected in the form: vec![(num, requires_justification, is_finalized)] - fn write_headers>( - storage: &mut S, - headers: Vec<(u64, bool, bool)>, - ) -> Vec> { - let mut imported_headers = vec![]; - let genesis = ImportedHeader { - header: test_header(0), - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - - >::put(genesis.hash()); - storage.write_header(&genesis); - imported_headers.push(genesis); - - for (num, requires_justification, is_finalized) in headers { - let header = ImportedHeader { - header: test_header(num), - requires_justification, - is_finalized, - signal_hash: None, - }; - - storage.write_header(&header); - imported_headers.push(header); - } - - imported_headers - } - - // Given a block number will generate a chain of headers which don't require justification and - // are not considered to be finalized. - fn write_default_headers>( - storage: &mut S, - headers: Vec, - ) -> Vec> { - let headers = headers.iter().map(|num| (*num, false, false)).collect(); - write_headers(storage, headers) - } - - #[test] - fn fails_to_import_old_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let parent = unfinalized_header(5); - storage.write_header(&parent); - storage.update_best_finalized(parent.hash()); - - let header = test_header(1); - let mut verifier = Verifier { storage }; - assert_err!(verifier.import_header(header.hash(), header), ImportError::OldHeader); - }) - } - - #[test] - fn fails_to_import_header_without_parent() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let parent = unfinalized_header(1); - storage.write_header(&parent); - storage.update_best_finalized(parent.hash()); - - // By default the parent is `0x00` - let header = TestHeader::new_from_number(2); - - let mut verifier = Verifier { storage }; - assert_err!( - verifier.import_header(header.hash(), header), - ImportError::MissingParent - ); - }) - } - - #[test] - fn fails_to_import_header_twice() { - run_test(|| { - let storage = PalletStorage::::new(); - let header = test_header(1); - >::put(header.hash()); - - let imported_header = ImportedHeader { - header: header.clone(), - requires_justification: false, - is_finalized: false, - signal_hash: None, - }; - >::insert(header.hash(), &imported_header); - - let mut verifier = Verifier { storage }; - assert_err!(verifier.import_header(header.hash(), header), ImportError::OldHeader); - }) - } - - #[test] - fn succesfully_imports_valid_but_unfinalized_header() { - run_test(|| { - let storage = PalletStorage::::new(); - let parent = test_header(1); - let parent_hash = parent.hash(); - >::put(parent.hash()); - - let imported_header = ImportedHeader { - header: parent, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - >::insert(parent_hash, &imported_header); - - let header = test_header(2); - let mut verifier = Verifier { - storage: storage.clone(), - }; - assert_ok!(verifier.import_header(header.hash(), header.clone())); - - let stored_header = storage - .header_by_hash(header.hash()) - .expect("Should have been imported successfully"); - assert_eq!(stored_header.is_finalized, false); - assert_eq!(stored_header.hash(), storage.best_headers()[0].hash); - }) - } - - #[test] - fn successfully_imports_two_different_headers_at_same_height() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - // We want to write the genesis header to storage - let _ = write_headers(&mut storage, vec![]); - - // Both of these headers have the genesis header as their parent - let header_on_fork1 = test_header(1); - let mut header_on_fork2 = test_header(1); - - // We need to change _something_ to make it a different header - header_on_fork2.state_root = [1; 32].into(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - // It should be fine to import both - assert_ok!(verifier.import_header(header_on_fork1.hash(), header_on_fork1.clone())); - assert_ok!(verifier.import_header(header_on_fork2.hash(), header_on_fork2.clone())); - - // We should have two headers marked as being the best since they're - // both at the same height - let best_headers = storage.best_headers(); - assert_eq!(best_headers.len(), 2); - assert_eq!( - best_headers[0], - HeaderId { - number: *header_on_fork1.number(), - hash: header_on_fork1.hash() - } - ); - assert_eq!( - best_headers[1], - HeaderId { - number: *header_on_fork2.number(), - hash: header_on_fork2.hash() - } - ); - assert_eq!(>::get(), 1); - }) - } - - #[test] - fn correctly_updates_the_best_header_given_a_better_header() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - // We want to write the genesis header to storage - let _ = write_headers(&mut storage, vec![]); - - // Write two headers at the same height to storage. - let best_header = test_header(1); - let mut also_best_header = test_header(1); - - // We need to change _something_ to make it a different header - also_best_header.state_root = [1; 32].into(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - // It should be fine to import both - assert_ok!(verifier.import_header(best_header.hash(), best_header.clone())); - assert_ok!(verifier.import_header(also_best_header.hash(), also_best_header)); - - // The headers we manually imported should have been marked as the best - // upon writing to storage. Let's confirm that. - assert_eq!(storage.best_headers().len(), 2); - assert_eq!(>::get(), 1); - - // Now let's build something at a better height. - let mut better_header = test_header(2); - better_header.parent_hash = best_header.hash(); - - assert_ok!(verifier.import_header(better_header.hash(), better_header.clone())); - - // Since `better_header` is the only one at height = 2 we should only have - // a single "best header" now. - let best_headers = storage.best_headers(); - assert_eq!(best_headers.len(), 1); - assert_eq!( - best_headers[0], - HeaderId { - number: *better_header.number(), - hash: better_header.hash() - } - ); - assert_eq!(>::get(), 2); - }) - } - - #[test] - fn doesnt_write_best_header_twice_upon_finalization() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let _imported_headers = write_default_headers(&mut storage, vec![1]); - - let set_id = 1; - let authorities = authority_list(); - let initial_authority_set = AuthoritySet::new(authorities.clone(), set_id); - storage.update_current_authority_set(initial_authority_set); - - // Let's import our header - let header = test_header(2); - let mut verifier = Verifier { - storage: storage.clone(), - }; - assert_ok!(verifier.import_header(header.hash(), header.clone())); - - // Our header should be the only best header we have - assert_eq!(storage.best_headers()[0].hash, header.hash()); - assert_eq!(storage.best_headers().len(), 1); - - // Now lets finalize our best header - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); - - // Our best header should only appear once in the list of best headers - assert_eq!(storage.best_headers()[0].hash, header.hash()); - assert_eq!(storage.best_headers().len(), 1); - }) - } - - #[test] - fn related_headers_are_ancestors() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); - - for header in imported_headers.iter() { - assert!(storage.header_exists(header.hash())); - } - - let ancestor = imported_headers.remove(0); - let child = imported_headers.pop().unwrap(); - let ancestors = headers_between(&storage, ancestor, child); - - assert!(ancestors.is_some()); - assert_eq!(ancestors.unwrap().len(), 3); - }) - } - - #[test] - fn unrelated_headers_are_not_ancestors() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); - for header in imported_headers.iter() { - assert!(storage.header_exists(header.hash())); - } - - // Need to give it a different parent_hash or else it'll be - // related to our test genesis header - let mut bad_ancestor = test_header(0); - bad_ancestor.parent_hash = [1u8; 32].into(); - let bad_ancestor = ImportedHeader { - header: bad_ancestor, - requires_justification: false, - is_finalized: false, - signal_hash: None, - }; - - let child = imported_headers.pop().unwrap(); - let ancestors = headers_between(&storage, bad_ancestor, child); - assert!(ancestors.is_none()); - }) - } - - #[test] - fn ancestor_newer_than_child_is_not_related() { - run_test(|| { - let mut storage = PalletStorage::::new(); - - let mut imported_headers = write_default_headers(&mut storage, vec![1, 2, 3]); - for header in imported_headers.iter() { - assert!(storage.header_exists(header.hash())); - } - - // What if we have an "ancestor" that's newer than child? - let new_ancestor = test_header(5); - let new_ancestor = ImportedHeader { - header: new_ancestor, - requires_justification: false, - is_finalized: false, - signal_hash: None, - }; - - let child = imported_headers.pop().unwrap(); - let ancestors = headers_between(&storage, new_ancestor, child); - assert!(ancestors.is_none()); - }) - } - - #[test] - fn doesnt_import_header_which_schedules_change_with_invalid_authority_set() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let _imported_headers = write_default_headers(&mut storage, vec![1]); - let mut header = test_header(2); - - // This is an *invalid* authority set because the combined weight of the - // authorities is greater than `u64::MAX` - let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { - next_authorities: vec![(alice(), u64::MAX), (bob(), u64::MAX)], - delay: 0, - }); - - header.digest = Digest:: { - logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], - }; - - let mut verifier = Verifier { storage }; - - assert_eq!( - verifier.import_header(header.hash(), header).unwrap_err(), - ImportError::InvalidAuthoritySet - ); - }) - } - - #[test] - fn finalizes_header_which_doesnt_enact_or_schedule_a_new_authority_set() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let _imported_headers = write_default_headers(&mut storage, vec![1]); - - // Nothing special about this header, yet GRANDPA may have created a justification - // for it since it does that periodically - let header = test_header(2); - - let set_id = 1; - let authorities = authority_list(); - let authority_set = AuthoritySet::new(authorities.clone(), set_id); - storage.update_current_authority_set(authority_set); - - // We'll need this justification to finalize the header - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - assert_ok!(verifier.import_header(header.hash(), header.clone())); - assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); - assert_eq!(storage.best_finalized_header().header, header); - }) - } - - #[test] - fn correctly_verifies_and_finalizes_chain_of_headers() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let imported_headers = write_default_headers(&mut storage, vec![1, 2]); - let header = test_header(3); - - let set_id = 1; - let authorities = authority_list(); - let authority_set = AuthoritySet { - authorities: authorities.clone(), - set_id, - }; - storage.update_current_authority_set(authority_set); - - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - assert!(verifier.import_header(header.hash(), header.clone()).is_ok()); - assert!(verifier - .import_finality_proof(header.hash(), justification.into()) - .is_ok()); - - // Make sure we marked the our headers as finalized - assert!(storage.header_by_hash(imported_headers[1].hash()).unwrap().is_finalized); - assert!(storage.header_by_hash(imported_headers[2].hash()).unwrap().is_finalized); - assert!(storage.header_by_hash(header.hash()).unwrap().is_finalized); - - // Make sure the header at the highest height is the best finalized - assert_eq!(storage.best_finalized_header().header, header); - }); - } - - #[test] - fn updates_authority_set_upon_finalizing_header_which_enacts_change() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let genesis_hash = write_headers(&mut storage, vec![])[0].hash(); - - // We want this header to indicate that there's an upcoming set change on this fork - let parent = ImportedHeader { - header: test_header(1), - requires_justification: false, - is_finalized: false, - signal_hash: Some(genesis_hash), - }; - storage.write_header(&parent); - - let set_id = 1; - let authorities = authority_list(); - let initial_authority_set = AuthoritySet::new(authorities.clone(), set_id); - storage.update_current_authority_set(initial_authority_set); - - // This header enacts an authority set change upon finalization - let header = test_header(2); - - let grandpa_round = 1; - let justification = make_justification_for_header(&header, grandpa_round, set_id, &authorities).encode(); - - // Schedule a change at the height of our header - let set_id = 2; - let height = *header.number(); - let authorities = vec![alice()]; - let change = schedule_next_change(authorities, set_id, height); - storage.schedule_next_set_change(genesis_hash, change.clone()); - - let mut verifier = Verifier { - storage: storage.clone(), - }; - - assert_ok!(verifier.import_header(header.hash(), header.clone())); - assert_eq!(storage.missing_justifications().len(), 1); - assert_eq!(storage.missing_justifications()[0].hash, header.hash()); - - assert_ok!(verifier.import_finality_proof(header.hash(), justification.into())); - assert_eq!(storage.best_finalized_header().header, header); - - // Make sure that we have updated the set now that we've finalized our header - assert_eq!(storage.current_authority_set(), change.authority_set); - assert!(storage.missing_justifications().is_empty()); - }) - } - - #[test] - fn importing_finality_proof_for_already_finalized_header_doesnt_work() { - run_test(|| { - let mut storage = PalletStorage::::new(); - let genesis = test_header(0); - - let genesis = ImportedHeader { - header: genesis, - requires_justification: false, - is_finalized: true, - signal_hash: None, - }; - - // Make sure that genesis is the best finalized header - >::put(genesis.hash()); - storage.write_header(&genesis); - - let mut verifier = Verifier { storage }; - - // Now we want to try and import it again to see what happens - assert_eq!( - verifier - .import_finality_proof(genesis.hash(), vec![4, 2].into()) - .unwrap_err(), - FinalizationError::OldHeader - ); - }); - } -} diff --git a/polkadot/bridges/primitives/currency-exchange/Cargo.toml b/polkadot/bridges/primitives/currency-exchange/Cargo.toml deleted file mode 100644 index 43367ba7992b1751825c775c397a9d2945f20c36..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/currency-exchange/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "bp-currency-exchange" -description = "Primitives of currency exchange module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/currency-exchange/src/lib.rs b/polkadot/bridges/primitives/currency-exchange/src/lib.rs deleted file mode 100644 index 131daf66eda5130ef3b6236700e15e57ecedf91e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/currency-exchange/src/lib.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -use codec::{Decode, Encode, EncodeLike}; -use frame_support::{Parameter, RuntimeDebug}; -use sp_api::decl_runtime_apis; -use sp_std::marker::PhantomData; - -/// All errors that may happen during exchange. -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - /// Invalid peer blockchain transaction provided. - InvalidTransaction, - /// Peer transaction has invalid amount. - InvalidAmount, - /// Peer transaction has invalid recipient. - InvalidRecipient, - /// Cannot map from peer recipient to this blockchain recipient. - FailedToMapRecipients, - /// Failed to convert from peer blockchain currency to this blockhain currency. - FailedToConvertCurrency, - /// Deposit has failed. - DepositFailed, - /// Deposit has partially failed (changes to recipient account were made). - DepositPartiallyFailed, -} - -/// Result of all exchange operations. -pub type Result = sp_std::result::Result; - -/// Peer blockchain lock funds transaction. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] -pub struct LockFundsTransaction { - /// Something that uniquely identifies this transfer. - pub id: TransferId, - /// Funds recipient on the peer chain. - pub recipient: Recipient, - /// Amount of the locked funds. - pub amount: Amount, -} - -/// Peer blockchain transaction that may represent lock funds transaction. -pub trait MaybeLockFundsTransaction { - /// Transaction type. - type Transaction; - /// Identifier that uniquely identifies this transfer. - type Id: Decode + Encode + EncodeLike + sp_std::fmt::Debug; - /// Peer recipient type. - type Recipient; - /// Peer currency amount type. - type Amount; - - /// Parse lock funds transaction of the peer blockchain. Returns None if - /// transaction format is unknown, or it isn't a lock funds transaction. - fn parse(tx: &Self::Transaction) -> Result>; -} - -/// Map that maps recipients from peer blockchain to this blockchain recipients. -pub trait RecipientsMap { - /// Peer blockchain recipient type. - type PeerRecipient; - /// Current blockchain recipient type. - type Recipient; - - /// Lookup current blockchain recipient by peer blockchain recipient. - fn map(peer_recipient: Self::PeerRecipient) -> Result; -} - -/// Conversion between two currencies. -pub trait CurrencyConverter { - /// Type of the source currency amount. - type SourceAmount; - /// Type of the target currency amount. - type TargetAmount; - - /// Covert from source to target currency. - fn convert(amount: Self::SourceAmount) -> Result; -} - -/// Currency deposit. -pub trait DepositInto { - /// Recipient type. - type Recipient; - /// Currency amount type. - type Amount; - - /// Grant some money to given account. - fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> Result<()>; -} - -/// Recipients map which is used when accounts ids are the same on both chains. -#[derive(Debug)] -pub struct IdentityRecipients(PhantomData); - -impl RecipientsMap for IdentityRecipients { - type PeerRecipient = AccountId; - type Recipient = AccountId; - - fn map(peer_recipient: Self::PeerRecipient) -> Result { - Ok(peer_recipient) - } -} - -/// Currency converter which is used when currency is the same on both chains. -#[derive(Debug)] -pub struct IdentityCurrencyConverter(PhantomData); - -impl CurrencyConverter for IdentityCurrencyConverter { - type SourceAmount = Amount; - type TargetAmount = Amount; - - fn convert(currency: Self::SourceAmount) -> Result { - Ok(currency) - } -} - -decl_runtime_apis! { - /// API for Rialto exchange transactions submitters. - pub trait RialtoCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } - - /// API for Kovan exchange transactions submitters. - pub trait KovanCurrencyExchangeApi { - /// Returns true if currency exchange module is able to import transaction proof in - /// its current state. - fn filter_transaction_proof(proof: Proof) -> bool; - } -} diff --git a/polkadot/bridges/primitives/ethereum-poa/Cargo.toml b/polkadot/bridges/primitives/ethereum-poa/Cargo.toml deleted file mode 100644 index cd2c3a97a0f32095dd8812246b663235fc8f3099..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/ethereum-poa/Cargo.toml +++ /dev/null @@ -1,57 +0,0 @@ -[package] -name = "bp-eth-poa" -description = "Primitives of Ethereum PoA Bridge module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -ethbloom = { version = "0.10.0", default-features = false, features = ["rlp"] } -fixed-hash = { version = "0.7", default-features = false } -hash-db = { version = "0.15.2", default-features = false } -impl-rlp = { version = "0.3", default-features = false } -impl-serde = { version = "0.3.1", optional = true } -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } -parity-bytes = { version = "0.1", default-features = false } -plain_hasher = { version = "0.2.2", default-features = false } -primitive-types = { version = "0.9", default-features = false, features = ["codec", "rlp"] } -rlp = { version = "0.5", default-features = false } -serde = { version = "1.0", optional = true } -serde-big-array = { version = "0.2", optional = true } -triehash = { version = "0.8.2", default-features = false } - -# Substrate Dependencies - -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -hex-literal = "0.2" - -[features] -default = ["std"] -std = [ - "codec/std", - "ethbloom/std", - "fixed-hash/std", - "hash-db/std", - "impl-rlp/std", - "impl-serde", - "libsecp256k1/std", - "parity-bytes/std", - "plain_hasher/std", - "primitive-types/std", - "primitive-types/serde", - "rlp/std", - "serde/std", - "serde-big-array", - "sp-api/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "triehash/std", -] diff --git a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs b/polkadot/bridges/primitives/ethereum-poa/src/lib.rs deleted file mode 100644 index dc65ac432b144ff67fbe870bcd71e75fe7599068..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs +++ /dev/null @@ -1,734 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -pub use parity_bytes::Bytes; -pub use primitive_types::{H160, H256, H512, U128, U256}; -pub use rlp::encode as rlp_encode; - -use codec::{Decode, Encode}; -use ethbloom::{Bloom as EthBloom, Input as BloomInput}; -use fixed_hash::construct_fixed_hash; -use rlp::{Decodable, DecoderError, Rlp, RlpStream}; -use sp_io::hashing::keccak_256; -use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; - -use impl_rlp::impl_fixed_hash_rlp; -#[cfg(feature = "std")] -use impl_serde::impl_fixed_hash_serde; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -#[cfg(feature = "std")] -use serde_big_array::big_array; - -construct_fixed_hash! { pub struct H520(65); } -impl_fixed_hash_rlp!(H520, 65); -#[cfg(feature = "std")] -impl_fixed_hash_serde!(H520, 65); - -/// Raw (RLP-encoded) ethereum transaction. -pub type RawTransaction = Vec; - -/// Raw (RLP-encoded) ethereum transaction receipt. -pub type RawTransactionReceipt = Vec; - -/// An ethereum address. -pub type Address = H160; - -pub mod signatures; - -/// Complete header id. -#[derive(Encode, Decode, Default, RuntimeDebug, PartialEq, Clone, Copy)] -pub struct HeaderId { - /// Header number. - pub number: u64, - /// Header hash. - pub hash: H256, -} - -/// An Aura header. -#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct AuraHeader { - /// Parent block hash. - pub parent_hash: H256, - /// Block timestamp. - pub timestamp: u64, - /// Block number. - pub number: u64, - /// Block author. - pub author: Address, - - /// Transactions root. - pub transactions_root: H256, - /// Block uncles hash. - pub uncles_hash: H256, - /// Block extra data. - pub extra_data: Bytes, - - /// State root. - pub state_root: H256, - /// Block receipts root. - pub receipts_root: H256, - /// Block bloom. - pub log_bloom: Bloom, - /// Gas used for contracts execution. - pub gas_used: U256, - /// Block gas limit. - pub gas_limit: U256, - - /// Block difficulty. - pub difficulty: U256, - /// Vector of post-RLP-encoded fields. - pub seal: Vec, -} - -/// Parsed ethereum transaction. -#[derive(PartialEq, RuntimeDebug)] -pub struct Transaction { - /// Sender address. - pub sender: Address, - /// Unsigned portion of ethereum transaction. - pub unsigned: UnsignedTransaction, -} - -/// Unsigned portion of ethereum transaction. -#[derive(Clone, PartialEq, RuntimeDebug)] -pub struct UnsignedTransaction { - /// Sender nonce. - pub nonce: U256, - /// Gas price. - pub gas_price: U256, - /// Gas limit. - pub gas: U256, - /// Transaction destination address. None if it is contract creation transaction. - pub to: Option
, - /// Value. - pub value: U256, - /// Associated data. - pub payload: Bytes, -} - -/// Information describing execution of a transaction. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct Receipt { - /// The total gas used in the block following execution of the transaction. - pub gas_used: U256, - /// The OR-wide combination of all logs' blooms for this transaction. - pub log_bloom: Bloom, - /// The logs stemming from this transaction. - pub logs: Vec, - /// Transaction outcome. - pub outcome: TransactionOutcome, -} - -/// Transaction outcome store in the receipt. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub enum TransactionOutcome { - /// Status and state root are unknown under EIP-98 rules. - Unknown, - /// State root is known. Pre EIP-98 and EIP-658 rules. - StateRoot(H256), - /// Status code is known. EIP-658 rules. - StatusCode(u8), -} - -/// A record of execution for a `LOG` operation. -#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] -pub struct LogEntry { - /// The address of the contract executing at the point of the `LOG` operation. - pub address: Address, - /// The topics associated with the `LOG` operation. - pub topics: Vec, - /// The data associated with the `LOG` operation. - pub data: Bytes, -} - -/// Logs bloom. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct Bloom(#[cfg_attr(feature = "std", serde(with = "BigArray"))] [u8; 256]); - -#[cfg(feature = "std")] -big_array! { BigArray; } - -/// An empty step message that is included in a seal, the only difference is that it doesn't include -/// the `parent_hash` in order to save space. The included signature is of the original empty step -/// message, which can be reconstructed by using the parent hash of the block in which this sealed -/// empty message is included. -pub struct SealedEmptyStep { - /// Signature of the original message author. - pub signature: H520, - /// The step this message is generated for. - pub step: u64, -} - -impl AuraHeader { - /// Compute id of this header. - pub fn compute_id(&self) -> HeaderId { - HeaderId { - number: self.number, - hash: self.compute_hash(), - } - } - - /// Compute hash of this header (keccak of the RLP with seal). - pub fn compute_hash(&self) -> H256 { - keccak_256(&self.rlp(true)).into() - } - - /// Get id of this header' parent. Returns None if this is genesis header. - pub fn parent_id(&self) -> Option { - self.number.checked_sub(1).map(|parent_number| HeaderId { - number: parent_number, - hash: self.parent_hash, - }) - } - - /// Check if passed transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_receipts_root(&self, receipts: &[Receipt]) -> Result { - check_merkle_proof(self.receipts_root, receipts.iter().map(|r| r.rlp())) - } - - /// Check if passed raw transactions receipts are matching receipts root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_raw_receipts_root<'a>( - &self, - receipts: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.receipts_root, receipts.into_iter()) - } - - /// Check if passed transactions are matching transactions root in this header. - /// Returns Ok(computed-root) if check succeeds. - /// Returns Err(computed-root) if check fails. - pub fn check_transactions_root<'a>( - &self, - transactions: impl IntoIterator, - ) -> Result { - check_merkle_proof(self.transactions_root, transactions.into_iter()) - } - - /// Gets the seal hash of this header. - pub fn seal_hash(&self, include_empty_steps: bool) -> Option { - Some(match include_empty_steps { - true => { - let mut message = self.compute_hash().as_bytes().to_vec(); - message.extend_from_slice(self.seal.get(2)?); - keccak_256(&message).into() - } - false => keccak_256(&self.rlp(false)).into(), - }) - } - - /// Get step this header is generated for. - pub fn step(&self) -> Option { - self.seal.get(0).map(|x| Rlp::new(&x)).and_then(|x| x.as_val().ok()) - } - - /// Get header author' signature. - pub fn signature(&self) -> Option { - self.seal.get(1).and_then(|x| Rlp::new(x).as_val().ok()) - } - - /// Extracts the empty steps from the header seal. - pub fn empty_steps(&self) -> Option> { - self.seal - .get(2) - .and_then(|x| Rlp::new(x).as_list::().ok()) - } - - /// Returns header RLP with or without seals. - fn rlp(&self, with_seal: bool) -> Bytes { - let mut s = RlpStream::new(); - if with_seal { - s.begin_list(13 + self.seal.len()); - } else { - s.begin_list(13); - } - - s.append(&self.parent_hash); - s.append(&self.uncles_hash); - s.append(&self.author); - s.append(&self.state_root); - s.append(&self.transactions_root); - s.append(&self.receipts_root); - s.append(&EthBloom::from(self.log_bloom.0)); - s.append(&self.difficulty); - s.append(&self.number); - s.append(&self.gas_limit); - s.append(&self.gas_used); - s.append(&self.timestamp); - s.append(&self.extra_data); - - if with_seal { - for b in &self.seal { - s.append_raw(b, 1); - } - } - - s.out().to_vec() - } -} - -impl UnsignedTransaction { - /// Decode unsigned portion of raw transaction RLP. - pub fn decode_rlp(raw_tx: &[u8]) -> Result { - let tx_rlp = Rlp::new(raw_tx); - let to = tx_rlp.at(3)?; - Ok(UnsignedTransaction { - nonce: tx_rlp.val_at(0)?, - gas_price: tx_rlp.val_at(1)?, - gas: tx_rlp.val_at(2)?, - to: match to.is_empty() { - false => Some(to.as_val()?), - true => None, - }, - value: tx_rlp.val_at(4)?, - payload: tx_rlp.val_at(5)?, - }) - } - - /// Returns message that has to be signed to sign this transaction. - pub fn message(&self, chain_id: Option) -> H256 { - keccak_256(&self.rlp(chain_id)).into() - } - - /// Returns unsigned transaction RLP. - pub fn rlp(&self, chain_id: Option) -> Bytes { - let mut stream = RlpStream::new_list(if chain_id.is_some() { 9 } else { 6 }); - self.rlp_to(chain_id, &mut stream); - stream.out().to_vec() - } - - /// Encode to given rlp stream. - pub fn rlp_to(&self, chain_id: Option, stream: &mut RlpStream) { - stream.append(&self.nonce); - stream.append(&self.gas_price); - stream.append(&self.gas); - match self.to { - Some(to) => stream.append(&to), - None => stream.append(&""), - }; - stream.append(&self.value); - stream.append(&self.payload); - if let Some(chain_id) = chain_id { - stream.append(&chain_id); - stream.append(&0u8); - stream.append(&0u8); - } - } -} - -impl Receipt { - /// Decode status from raw transaction receipt RLP. - pub fn is_successful_raw_receipt(raw_receipt: &[u8]) -> Result { - let rlp = Rlp::new(raw_receipt); - if rlp.item_count()? == 3 { - // no outcome - invalid tx? - Ok(false) - } else { - let first = rlp.at(0)?; - if first.is_data() && first.data()?.len() <= 1 { - // EIP-658 transaction - status of successful transaction is 1 - let status: u8 = first.as_val()?; - Ok(status == 1) - } else { - // pre-EIP-658 transaction - we do not support this kind of transactions - Ok(false) - } - } - } - - /// Returns receipt RLP. - pub fn rlp(&self) -> Bytes { - let mut s = RlpStream::new(); - match self.outcome { - TransactionOutcome::Unknown => { - s.begin_list(3); - } - TransactionOutcome::StateRoot(ref root) => { - s.begin_list(4); - s.append(root); - } - TransactionOutcome::StatusCode(ref status_code) => { - s.begin_list(4); - s.append(status_code); - } - } - s.append(&self.gas_used); - s.append(&EthBloom::from(self.log_bloom.0)); - - s.begin_list(self.logs.len()); - for log in &self.logs { - s.begin_list(3); - s.append(&log.address); - s.begin_list(log.topics.len()); - for topic in &log.topics { - s.append(topic); - } - s.append(&log.data); - } - - s.out().to_vec() - } -} - -impl SealedEmptyStep { - /// Returns message that has to be signed by the validator. - pub fn message(&self, parent_hash: &H256) -> H256 { - let mut message = RlpStream::new_list(2); - message.append(&self.step); - message.append(parent_hash); - keccak_256(&message.out()).into() - } - - /// Returns rlp for the vector of empty steps (we only do encoding in tests). - pub fn rlp_of(empty_steps: &[SealedEmptyStep]) -> Bytes { - let mut s = RlpStream::new(); - s.begin_list(empty_steps.len()); - for empty_step in empty_steps { - s.begin_list(2).append(&empty_step.signature).append(&empty_step.step); - } - s.out().to_vec() - } -} - -impl Decodable for SealedEmptyStep { - fn decode(rlp: &Rlp) -> Result { - let signature: H520 = rlp.val_at(0)?; - let step = rlp.val_at(1)?; - - Ok(SealedEmptyStep { signature, step }) - } -} - -impl LogEntry { - /// Calculates the bloom of this log entry. - pub fn bloom(&self) -> Bloom { - let eth_bloom = - self.topics - .iter() - .fold(EthBloom::from(BloomInput::Raw(self.address.as_bytes())), |mut b, t| { - b.accrue(BloomInput::Raw(t.as_bytes())); - b - }); - Bloom(*eth_bloom.data()) - } -} - -impl Bloom { - /// Returns true if this bloom has all bits from the other set. - pub fn contains(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| (l & r) == *r) - } -} - -impl<'a> From<&'a [u8; 256]> for Bloom { - fn from(buffer: &'a [u8; 256]) -> Bloom { - Bloom(*buffer) - } -} - -impl PartialEq for Bloom { - fn eq(&self, other: &Bloom) -> bool { - self.0.iter().zip(other.0.iter()).all(|(l, r)| l == r) - } -} - -impl Default for Bloom { - fn default() -> Self { - Bloom([0; 256]) - } -} - -#[cfg(feature = "std")] -impl std::fmt::Debug for Bloom { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Bloom").finish() - } -} - -/// Decode Ethereum transaction. -pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result { - // parse transaction fields - let unsigned = UnsignedTransaction::decode_rlp(raw_tx)?; - let tx_rlp = Rlp::new(raw_tx); - let v: u64 = tx_rlp.val_at(6)?; - let r: U256 = tx_rlp.val_at(7)?; - let s: U256 = tx_rlp.val_at(8)?; - - // reconstruct signature - let mut signature = [0u8; 65]; - let (chain_id, v) = match v { - v if v == 27u64 => (None, 0), - v if v == 28u64 => (None, 1), - v if v >= 35u64 => (Some((v - 35) / 2), ((v - 1) % 2) as u8), - _ => (None, 4), - }; - r.to_big_endian(&mut signature[0..32]); - s.to_big_endian(&mut signature[32..64]); - signature[64] = v; - - // reconstruct message that has been signed - let message = unsigned.message(chain_id); - - // recover tx sender - let sender_public = sp_io::crypto::secp256k1_ecdsa_recover(&signature, &message.as_fixed_bytes()) - .map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?; - let sender_address = public_to_address(&sender_public); - - Ok(Transaction { - sender: sender_address, - unsigned, - }) -} - -/// Convert public key into corresponding ethereum address. -pub fn public_to_address(public: &[u8; 64]) -> Address { - let hash = keccak_256(public); - let mut result = Address::zero(); - result.as_bytes_mut().copy_from_slice(&hash[12..]); - result -} - -/// Check ethereum merkle proof. -/// Returns Ok(computed-root) if check succeeds. -/// Returns Err(computed-root) if check fails. -fn check_merkle_proof>(expected_root: H256, items: impl Iterator) -> Result { - let computed_root = compute_merkle_root(items); - if computed_root == expected_root { - Ok(computed_root) - } else { - Err(computed_root) - } -} - -/// Compute ethereum merkle root. -pub fn compute_merkle_root>(items: impl Iterator) -> H256 { - struct Keccak256Hasher; - - impl hash_db::Hasher for Keccak256Hasher { - type Out = H256; - type StdHasher = plain_hasher::PlainHasher; - const LENGTH: usize = 32; - fn hash(x: &[u8]) -> Self::Out { - keccak_256(x).into() - } - } - - triehash::ordered_trie_root::(items) -} - -/// Get validator that should author the block at given step. -pub fn step_validator(header_validators: &[T], header_step: u64) -> &T { - &header_validators[(header_step % header_validators.len() as u64) as usize] -} - -sp_api::decl_runtime_apis! { - /// API for querying information about headers from the Rialto Bridge Pallet - pub trait RialtoPoAHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } - - /// API for querying information about headers from the Kovan Bridge Pallet - pub trait KovanHeaderApi { - /// Returns number and hash of the best block known to the bridge module. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_block() -> (u64, H256); - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (u64, H256); - /// Returns true if the import of given block requires transactions receipts. - fn is_import_requires_receipts(header: AuraHeader) -> bool; - /// Returns true if header is known to the runtime. - fn is_known_block(hash: H256) -> bool; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn transfer_transaction_decode_works() { - // value transfer transaction - // https://etherscan.io/tx/0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - // https://etherscan.io/getRawTx?tx=0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd - let raw_tx = hex!("f86c0a85046c7cfe0083016dea94d1310c1e038bc12865d3d3997275b3e4737c6302880b503be34d9fe80080269fc7eaaa9c21f59adf8ad43ed66cf5ef9ee1c317bd4d32cd65401e7aaca47cfaa0387d79c65b90be6260d09dcfb780f29dd8133b9b1ceb20b83b7e442b4bfc30cb"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("67835910d32600471f388a137bbff3eb07993c04").into(), - unsigned: UnsignedTransaction { - nonce: 10.into(), - gas_price: 19000000000u64.into(), - gas: 93674.into(), - to: Some(hex!("d1310c1e038bc12865d3d3997275b3e4737c6302").into()), - value: 815217380000000000_u64.into(), - payload: Default::default(), - } - }), - ); - - // Kovan value transfer transaction - // https://kovan.etherscan.io/tx/0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - // https://kovan.etherscan.io/getRawTx?tx=0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da - let raw_tx = hex!("f86a822816808252089470c1ccde719d6f477084f07e4137ab0e55f8369f8930cf46e92063afd8008078a00e4d1f4d8aa992bda3c105ff3d6e9b9acbfd99facea00985e2131029290adbdca028ea29a46a4b66ec65b454f0706228e3768cb0ecf755f67c50ddd472f11d5994"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), - unsigned: UnsignedTransaction { - nonce: 10262.into(), - gas_price: 0.into(), - gas: 21000.into(), - to: Some(hex!("70c1ccde719d6f477084f07e4137ab0e55f8369f").into()), - value: 900379597077600000000_u128.into(), - payload: Default::default(), - }, - }), - ); - } - - #[test] - fn payload_transaction_decode_works() { - // contract call transaction - // https://etherscan.io/tx/0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - // https://etherscan.io/getRawTx?tx=0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 - let raw_tx = hex!("f8aa76850430e234008301500094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b4025a0964e023999621dc3d4d831c43c71f7555beb6d1192dee81a3674b3f57e310f21a00f229edd86f841d1ee4dc48cc16667e2283817b1d39bae16ced10cd206ae4fd4"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("2b9a4d37bdeecdf994c4c9ad7f3cf8dc632f7d70").into(), - unsigned: UnsignedTransaction { - nonce: 118.into(), - gas_price: 18000000000u64.into(), - gas: 86016.into(), - to: Some(hex!("dac17f958d2ee523a2206206994597c13d831ec7").into()), - value: 0.into(), - payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec(), - }, - }), - ); - - // Kovan contract call transaction - // https://kovan.etherscan.io/tx/0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - // https://kovan.etherscan.io/getRawTx?tx=0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf - let raw_tx = hex!("f8ac8302200b843b9aca00830271009484dd11eb2a29615303d18149c0dbfa24167f896680b844a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b600000000000000000000000000000000000000000000000000000000000027101ba0ce126d2cca81f5e245f292ff84a0d915c0a4ac52af5c51219db1e5d36aa8da35a0045298b79dac631907403888f9b04c2ab5509fe0cc31785276d30a40b915fcf9"); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: hex!("617da121abf03d4c1af572f5a4e313e26bef7bdc").into(), - unsigned: UnsignedTransaction { - nonce: 139275.into(), - gas_price: 1000000000.into(), - gas: 160000.into(), - to: Some(hex!("84dd11eb2a29615303d18149c0dbfa24167f8966").into()), - value: 0.into(), - payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec(), - }, - }), - ); - } - - #[test] - fn is_successful_raw_receipt_works() { - assert!(Receipt::is_successful_raw_receipt(&[]).is_err()); - - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::Unknown, - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StateRoot(Default::default()), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(0), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(false), - ); - assert_eq!( - Receipt::is_successful_raw_receipt( - &Receipt { - outcome: TransactionOutcome::StatusCode(1), - gas_used: Default::default(), - log_bloom: Default::default(), - logs: Vec::new(), - } - .rlp() - ), - Ok(true), - ); - } - - #[test] - fn is_successful_raw_receipt_with_empty_data() { - let mut stream = RlpStream::new(); - stream.begin_list(4); - stream.append_empty_data(); - stream.append(&1u64); - stream.append(&2u64); - stream.append(&3u64); - - assert_eq!(Receipt::is_successful_raw_receipt(&stream.out()), Ok(false),); - } -} diff --git a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs b/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs deleted file mode 100644 index 21c6f3f860fffc2a3e6246abc57596d8ddd3589b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . -// - -//! Helpers related to signatures. -//! -//! Used for testing and benchmarking. - -// reexport to avoid direct secp256k1 deps by other crates -pub use secp256k1::SecretKey; - -use crate::{ - public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction, UnsignedTransaction, H256, - H520, U256, -}; - -use secp256k1::{Message, PublicKey}; - -/// Utilities for signing headers. -pub trait SignHeader { - /// Signs header by given author. - fn sign_by(self, author: &SecretKey) -> AuraHeader; - /// Signs header by given authors set. - fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader; -} - -/// Utilities for signing transactions. -pub trait SignTransaction { - /// Sign transaction by given author. - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction; -} - -impl SignHeader for AuraHeader { - fn sign_by(mut self, author: &SecretKey) -> Self { - self.author = secret_to_address(author); - - let message = self.seal_hash(false).unwrap(); - let signature = sign(author, message); - self.seal[1] = rlp_encode(&signature).to_vec(); - self - } - - fn sign_by_set(self, authors: &[SecretKey]) -> Self { - let step = self.step().unwrap(); - let author = step_validator(authors, step); - self.sign_by(author) - } -} - -impl SignTransaction for UnsignedTransaction { - fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction { - let message = self.message(chain_id); - let signature = sign(author, message); - let signature_r = U256::from_big_endian(&signature.as_fixed_bytes()[..32][..]); - let signature_s = U256::from_big_endian(&signature.as_fixed_bytes()[32..64][..]); - let signature_v = signature.as_fixed_bytes()[64] as u64; - let signature_v = signature_v + if let Some(n) = chain_id { 35 + n * 2 } else { 27 }; - - let mut stream = rlp::RlpStream::new_list(9); - self.rlp_to(None, &mut stream); - stream.append(&signature_v); - stream.append(&signature_r); - stream.append(&signature_s); - stream.out().to_vec() - } -} - -/// Return author's signature over given message. -pub fn sign(author: &SecretKey, message: H256) -> H520 { - let (signature, recovery_id) = secp256k1::sign(&Message::parse(message.as_fixed_bytes()), author); - let mut raw_signature = [0u8; 65]; - raw_signature[..64].copy_from_slice(&signature.serialize()); - raw_signature[64] = recovery_id.serialize(); - raw_signature.into() -} - -/// Returns address corresponding to given secret key. -pub fn secret_to_address(secret: &SecretKey) -> Address { - let public = PublicKey::from_secret_key(secret); - let mut raw_public = [0u8; 64]; - raw_public.copy_from_slice(&public.serialize()[1..]); - public_to_address(&raw_public) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{transaction_decode_rlp, Transaction}; - - #[test] - fn transaction_signed_properly() { - // case1: with chain_id replay protection + to - let signer = SecretKey::parse(&[1u8; 32]).unwrap(); - let signer_address = secret_to_address(&signer); - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: Some([42u8; 20].into()), - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, Some(42)); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: signer_address, - unsigned, - }), - ); - - // case2: without chain_id replay protection + contract creation - let unsigned = UnsignedTransaction { - nonce: 100.into(), - gas_price: 200.into(), - gas: 300.into(), - to: None, - value: 400.into(), - payload: vec![1, 2, 3], - }; - let raw_tx = unsigned.clone().sign_by(&signer, None); - assert_eq!( - transaction_decode_rlp(&raw_tx), - Ok(Transaction { - sender: signer_address, - unsigned, - }), - ); - } -} diff --git a/polkadot/bridges/primitives/header-chain/Cargo.toml b/polkadot/bridges/primitives/header-chain/Cargo.toml deleted file mode 100644 index dc58dafb979efd1c73f6d1cef0e4b041e4e3f720..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/header-chain/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-header-chain" -description = "A common interface for describing what a bridge pallet should be able to do." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -finality-grandpa = { version = "0.14.0", default-features = false } -serde = { version = "1.0", optional = true } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[dev-dependencies] -bp-test-utils = { path = "../test-utils" } - -[features] -default = ["std"] -std = [ - "codec/std", - "finality-grandpa/std", - "serde/std", - "frame-support/std", - "sp-core/std", - "sp-finality-grandpa/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/header-chain/src/justification.rs b/polkadot/bridges/primitives/header-chain/src/justification.rs deleted file mode 100644 index fef9aedac90c80259bf053c8e2ae13876ddc022a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/header-chain/src/justification.rs +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module for checking GRANDPA Finality Proofs. -//! -//! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin -//! will ever be moved to the sp_finality_grandpa, we should reuse that implementation. - -use codec::{Decode, Encode}; -use finality_grandpa::{voter_set::VoterSet, Chain, Error as GrandpaError}; -use frame_support::RuntimeDebug; -use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId}; -use sp_runtime::traits::Header as HeaderT; -use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; -use sp_std::prelude::Vec; - -/// Justification verification error. -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - /// Failed to decode justification. - JustificationDecode, - /// Justification is finalizing unexpected header. - InvalidJustificationTarget, - /// Invalid commit in justification. - InvalidJustificationCommit, - /// Justification has invalid authority singature. - InvalidAuthoritySignature, - /// The justification has precommit for the header that has no route from the target header. - InvalidPrecommitAncestryProof, - /// The justification has 'unused' headers in its precommit ancestries. - InvalidPrecommitAncestries, -} - -/// Decode justification target. -pub fn decode_justification_target( - raw_justification: &[u8], -) -> Result<(Header::Hash, Header::Number), Error> { - GrandpaJustification::
::decode(&mut &*raw_justification) - .map(|justification| (justification.commit.target_hash, justification.commit.target_number)) - .map_err(|_| Error::JustificationDecode) -} - -/// Verify that justification, that is generated by given authority set, finalizes given header. -pub fn verify_justification( - finalized_target: (Header::Hash, Header::Number), - authorities_set_id: SetId, - authorities_set: VoterSet, - raw_justification: &[u8], -) -> Result<(), Error> -where - Header::Number: finality_grandpa::BlockNumberOps, -{ - // Decode justification first - let justification = - GrandpaJustification::
::decode(&mut &*raw_justification).map_err(|_| Error::JustificationDecode)?; - - // Ensure that it is justification for the expected header - if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { - return Err(Error::InvalidJustificationTarget); - } - - // Validate commit of the justification. Note that `validate_commit()` assumes that all - // signatures are valid. We'll check the validity of the signatures later since they're more - // resource intensive to verify. - let ancestry_chain = AncestryChain::new(&justification.votes_ancestries); - match finality_grandpa::validate_commit(&justification.commit, &authorities_set, &ancestry_chain) { - Ok(ref result) if result.ghost().is_some() => {} - _ => return Err(Error::InvalidJustificationCommit), - } - - // Now that we know that the commit is correct, check authorities signatures - let mut buf = Vec::new(); - let mut visited_hashes = BTreeSet::new(); - for signed in &justification.commit.precommits { - if !sp_finality_grandpa::check_message_signature_with_buffer( - &finality_grandpa::Message::Precommit(signed.precommit.clone()), - &signed.id, - &signed.signature, - justification.round, - authorities_set_id, - &mut buf, - ) { - return Err(Error::InvalidAuthoritySignature); - } - - if justification.commit.target_hash == signed.precommit.target_hash { - continue; - } - - match ancestry_chain.ancestry(justification.commit.target_hash, signed.precommit.target_hash) { - Ok(route) => { - // ancestry starts from parent hash but the precommit target hash has been visited - visited_hashes.insert(signed.precommit.target_hash); - visited_hashes.extend(route); - } - _ => { - // could this happen in practice? I don't think so, but original code has this check - return Err(Error::InvalidPrecommitAncestryProof); - } - } - } - - let ancestry_hashes = justification - .votes_ancestries - .iter() - .map(|h: &Header| h.hash()) - .collect(); - if visited_hashes != ancestry_hashes { - return Err(Error::InvalidPrecommitAncestries); - } - - Ok(()) -} - -/// A GRANDPA Justification is a proof that a given header was finalized -/// at a certain height and with a certain set of authorities. -/// -/// This particular proof is used to prove that headers on a bridged chain -/// (so not our chain) have been finalized correctly. -#[derive(Encode, Decode, RuntimeDebug)] -pub struct GrandpaJustification { - /// The round (voting period) this justification is valid for. - pub round: u64, - /// The set of votes for the chain which is to be finalized. - pub commit: finality_grandpa::Commit, - /// A proof that the chain of blocks in the commit are related to each other. - pub votes_ancestries: Vec
, -} - -/// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. -#[derive(RuntimeDebug)] -struct AncestryChain { - ancestry: BTreeMap, -} - -impl AncestryChain
{ - fn new(ancestry: &[Header]) -> AncestryChain
{ - AncestryChain { - ancestry: ancestry - .iter() - .map(|header| (header.hash(), *header.parent_hash())) - .collect(), - } - } -} - -impl finality_grandpa::Chain for AncestryChain
-where - Header::Number: finality_grandpa::BlockNumberOps, -{ - fn ancestry(&self, base: Header::Hash, block: Header::Hash) -> Result, GrandpaError> { - let mut route = Vec::new(); - let mut current_hash = block; - loop { - if current_hash == base { - break; - } - match self.ancestry.get(¤t_hash).cloned() { - Some(parent_hash) => { - current_hash = parent_hash; - route.push(current_hash); - } - _ => return Err(GrandpaError::NotDescendent), - } - } - route.pop(); // remove the base - - Ok(route) - } -} diff --git a/polkadot/bridges/primitives/header-chain/src/lib.rs b/polkadot/bridges/primitives/header-chain/src/lib.rs deleted file mode 100644 index 1663717646021b89ee74bf0b4e367807ab38455e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/header-chain/src/lib.rs +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Defines traits which represent a common interface for Substrate pallets which want to -//! incorporate bridge functionality. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Codec, Decode, Encode, EncodeLike}; -use core::clone::Clone; -use core::cmp::Eq; -use core::default::Default; -use core::fmt::Debug; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use sp_finality_grandpa::{AuthorityList, SetId}; -use sp_runtime::traits::Header as HeaderT; -use sp_runtime::RuntimeDebug; -use sp_std::vec::Vec; - -pub mod justification; - -/// A type that can be used as a parameter in a dispatchable function. -/// -/// When using `decl_module` all arguments for call functions must implement this trait. -pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug {} -impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug {} - -/// A GRANDPA Authority List and ID. -#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct AuthoritySet { - /// List of GRANDPA authorities for the current round. - pub authorities: AuthorityList, - /// Monotonic identifier of the current GRANDPA authority set. - pub set_id: SetId, -} - -impl AuthoritySet { - /// Create a new GRANDPA Authority Set. - pub fn new(authorities: AuthorityList, set_id: SetId) -> Self { - Self { authorities, set_id } - } -} - -/// base trait for verifying transaction inclusion proofs. -pub trait InclusionProofVerifier { - /// Transaction type. - type Transaction: Parameter; - /// Transaction inclusion proof type. - type TransactionInclusionProof: Parameter; - - /// Verify that transaction is a part of given block. - /// - /// Returns Some(transaction) if proof is valid and None otherwise. - fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option; -} - -/// A trait for pallets which want to keep track of finalized headers from a bridged chain. -pub trait HeaderChain { - /// Get the best finalized header known to the header chain. - fn best_finalized() -> H; - - /// Get the best authority set known to the header chain. - fn authority_set() -> AuthoritySet; - - /// Write a header finalized by GRANDPA to the underlying pallet storage. - fn append_header(header: H); -} - -impl HeaderChain for () { - fn best_finalized() -> H { - H::default() - } - - fn authority_set() -> AuthoritySet { - AuthoritySet::default() - } - - fn append_header(_header: H) {} -} - -/// A trait for checking if a given child header is a direct descendant of an ancestor. -pub trait AncestryChecker { - /// Is the child header a descendant of the ancestor header? - fn are_ancestors(ancestor: &H, child: &H, proof: &P) -> bool; -} - -impl AncestryChecker for () { - fn are_ancestors(_ancestor: &H, _child: &H, _proof: &P) -> bool { - true - } -} - -/// A simple ancestry checker which verifies ancestry by walking every header between `child` and -/// `ancestor`. -pub struct LinearAncestryChecker; - -impl AncestryChecker> for LinearAncestryChecker { - fn are_ancestors(ancestor: &H, child: &H, proof: &Vec) -> bool { - // You can't be your own parent - if proof.len() < 2 { - return false; - } - - // Let's make sure that the given headers are actually in the proof - match proof.first() { - Some(first) if first == ancestor => {} - _ => return false, - } - - match proof.last() { - Some(last) if last == child => {} - _ => return false, - } - - // Now we actually check the proof - for i in 1..proof.len() { - if &proof[i - 1].hash() != proof[i].parent_hash() { - return false; - } - } - - true - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_test_utils::test_header; - use sp_runtime::testing::Header; - - #[test] - fn can_verify_ancestry_correctly() { - let ancestor: Header = test_header(1); - let header2: Header = test_header(2); - let header3: Header = test_header(3); - let child: Header = test_header(4); - - let ancestry_proof = vec![ancestor.clone(), header2, header3, child.clone()]; - - assert!(LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof)); - } - - #[test] - fn does_not_verify_invalid_proof() { - let ancestor: Header = test_header(1); - let header2: Header = test_header(2); - let header3: Header = test_header(3); - let child: Header = test_header(4); - - let ancestry_proof = vec![ancestor.clone(), header3, header2, child.clone()]; - - let invalid = !LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof); - assert!(invalid); - } - - #[test] - fn header_is_not_allowed_to_be_its_own_ancestor() { - let ancestor: Header = test_header(1); - let child: Header = ancestor.clone(); - let ancestry_proof = vec![ancestor.clone()]; - - let invalid = !LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof); - assert!(invalid); - } - - #[test] - fn proof_is_considered_invalid_if_child_and_ancestor_do_not_match() { - let ancestor: Header = test_header(1); - let header2: Header = test_header(2); - let header3: Header = test_header(3); - let child: Header = test_header(4); - - let ancestry_proof = vec![ancestor, header3.clone(), header2.clone(), child]; - - let invalid = !LinearAncestryChecker::are_ancestors(&header2, &header3, &ancestry_proof); - assert!(invalid); - } - - #[test] - fn empty_proof_is_invalid() { - let ancestor: Header = test_header(1); - let child: Header = ancestor.clone(); - let ancestry_proof = vec![]; - - let invalid = !LinearAncestryChecker::are_ancestors(&ancestor, &child, &ancestry_proof); - assert!(invalid); - } -} diff --git a/polkadot/bridges/primitives/header-chain/tests/justification.rs b/polkadot/bridges/primitives/header-chain/tests/justification.rs deleted file mode 100644 index 81bd83b1ad3b4744cd5765148ffef0870e28c365..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/header-chain/tests/justification.rs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for Grandpa Justification code. - -use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification}; -use bp_test_utils::*; -use codec::Encode; - -type TestHeader = sp_runtime::testing::Header; - -fn make_justification_for_header_1() -> GrandpaJustification { - make_justification_for_header( - &test_header(1), - TEST_GRANDPA_ROUND, - TEST_GRANDPA_SET_ID, - &authority_list(), - ) -} - -#[test] -fn justification_with_invalid_encoding_rejected() { - assert_eq!( - verify_justification::(header_id::(1), TEST_GRANDPA_SET_ID, voter_set(), &[],), - Err(Error::JustificationDecode), - ); -} - -#[test] -fn justification_with_invalid_target_rejected() { - assert_eq!( - verify_justification::( - header_id::(2), - TEST_GRANDPA_SET_ID, - voter_set(), - &make_justification_for_header_1().encode(), - ), - Err(Error::InvalidJustificationTarget), - ); -} - -#[test] -fn justification_with_invalid_commit_rejected() { - let mut justification = make_justification_for_header_1(); - justification.commit.precommits.clear(); - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - voter_set(), - &justification.encode(), - ), - Err(Error::InvalidJustificationCommit), - ); -} - -#[test] -fn justification_with_invalid_authority_signature_rejected() { - let mut justification = make_justification_for_header_1(); - justification.commit.precommits[0].signature = Default::default(); - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - voter_set(), - &justification.encode(), - ), - Err(Error::InvalidAuthoritySignature), - ); -} - -#[test] -fn justification_with_invalid_precommit_ancestry() { - let mut justification = make_justification_for_header_1(); - justification.votes_ancestries.push(test_header(10)); - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - voter_set(), - &justification.encode(), - ), - Err(Error::InvalidPrecommitAncestries), - ); -} - -#[test] -fn valid_justification_accepted() { - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - voter_set(), - &make_justification_for_header_1().encode(), - ), - Ok(()), - ); -} diff --git a/polkadot/bridges/primitives/kusama/Cargo.toml b/polkadot/bridges/primitives/kusama/Cargo.toml deleted file mode 100644 index 784f0b013289a2b3524ae4f3c6c9bf6f675e8c19..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/kusama/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-kusama" -description = "Primitives of Kusama runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-message-lane = { path = "../message-lane", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-message-lane/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/kusama/src/lib.rs b/polkadot/bridges/primitives/kusama/src/lib.rs deleted file mode 100644 index 9ec032dbd513859a512efcde849b3af9b42e656d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/kusama/src/lib.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Runtime-generated DecodeLimit::decode_all_with_depth_limit -#![allow(clippy::unnecessary_mut_passed)] - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::Chain; -use frame_support::{weights::Weight, RuntimeDebug}; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - generic, - traits::{BlakeTwo256, IdentifyAccount, Verify}, - MultiSignature, OpaqueExtrinsic as UncheckedExtrinsic, -}; -use sp_std::prelude::*; - -// TODO: may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 -/// Maximal number of messages in single delivery transaction. -pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; - -/// Maximal number of unrewarded relayer entries at inbound lane. -pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; - -// TODO: should be selected keeping in mind: -// finality delay on both chains + reward payout cost + messages throughput. -/// Maximal number of unconfirmed messages at inbound lane. -pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192; - -/// Block number type used in Kusama. -pub type BlockNumber = u32; - -/// Hash type used in Kusama. -pub type Hash = ::Out; - -/// The type of an object that can produce hashes on Kusama. -pub type Hasher = BlakeTwo256; - -/// The header type used by Kusama. -pub type Header = generic::Header; - -/// Signature type used by Kusama. -pub type Signature = MultiSignature; - -/// Public key of account on Kusama chain. -pub type AccountPublic = ::Signer; - -/// Id of account on Kusama chain. -pub type AccountId = ::AccountId; - -/// Index of a transaction on the Kusama chain. -pub type Nonce = u32; - -/// Block type of Kusama. -pub type Block = generic::Block; - -/// Kusama block signed with a Justification. -pub type SignedBlock = generic::SignedBlock; - -/// The balance of an account on Polkadot. -pub type Balance = u128; - -/// Kusama chain. -#[derive(RuntimeDebug)] -pub struct Kusama; - -impl Chain for Kusama { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -/// Name of the `KusamaHeaderApi::best_blocks` runtime method. -pub const BEST_KUSAMA_BLOCKS_METHOD: &str = "KusamaHeaderApi_best_blocks"; -/// Name of the `KusamaHeaderApi::finalized_block` runtime method. -pub const FINALIZED_KUSAMA_BLOCK_METHOD: &str = "KusamaHeaderApi_finalized_block"; -/// Name of the `KusamaHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_KUSAMA_BLOCK_METHOD: &str = "KusamaHeaderApi_is_known_block"; -/// Name of the `KusamaHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_KUSAMA_HEADERS_METHOD: &str = "KusamaHeaderApi_incomplete_headers"; - -sp_api::decl_runtime_apis! { - /// API for querying information about Kusama headers from the Bridge Pallet instance. - /// - /// This API is implemented by runtimes that are bridging with Kusama chain, not the - /// Kusama runtime itself. - pub trait KusamaHeaderApi { - /// Returns number and hash of the best blocks known to the bridge module. - /// - /// Will return multiple headers if there are many headers at the same "best" height. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_blocks() -> Vec<(BlockNumber, Hash)>; - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (BlockNumber, Hash); - /// Returns numbers and hashes of headers that require finality proofs. - /// - /// An empty response means that there are no headers which currently require a - /// finality proof. - fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; - /// Returns true if the header is known to the runtime. - fn is_known_block(hash: Hash) -> bool; - /// Returns true if the header is considered finalized by the runtime. - fn is_finalized_block(hash: Hash) -> bool; - } - - /// Outbound message lane API for messages that are sent to Kusama chain. - /// - /// This API is implemented by runtimes that are sending messages to Kusama chain, not the - /// Kusama runtime itself. - pub trait ToKusamaOutboundLaneApi { - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; - /// Returns nonce of the latest message, received by bridged chain. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Returns nonce of the latest message, generated by given lane. - fn latest_generated_nonce(lane: LaneId) -> MessageNonce; - } - - /// Inbound message lane API for messages sent by Kusama chain. - /// - /// This API is implemented by runtimes that are receiving messages from Kusama chain, not the - /// Kusama runtime itself. - pub trait FromKusamaInboundLaneApi { - /// Returns nonce of the latest message, received by given lane. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. - fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; - } -} diff --git a/polkadot/bridges/primitives/message-dispatch/Cargo.toml b/polkadot/bridges/primitives/message-dispatch/Cargo.toml deleted file mode 100644 index 293c637e8df25994173fca9ebb9152b2996263f8..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/message-dispatch/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "bp-message-dispatch" -description = "Primitives of bridge messages dispatch modules." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "codec/std", -] diff --git a/polkadot/bridges/primitives/message-dispatch/src/lib.rs b/polkadot/bridges/primitives/message-dispatch/src/lib.rs deleted file mode 100644 index 1932d8cb0b8d403128212753140e2f2192df891f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/message-dispatch/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! A common interface for all Bridge Message Dispatch modules. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] - -use bp_runtime::InstanceId; - -/// Message dispatch weight. -pub type Weight = u64; - -/// A generic trait to dispatch arbitrary messages delivered over the bridge. -pub trait MessageDispatch { - /// A type of the message to be dispatched. - type Message: codec::Decode; - - /// Estimate dispatch weight. - /// - /// This function must: (1) be instant and (2) return correct upper bound - /// of dispatch weight. - fn dispatch_weight(message: &Self::Message) -> Weight; - - /// Dispatches the message internally. - /// - /// `bridge` indicates instance of deployed bridge where the message came from. - /// - /// `id` is a short unique identifier of the message. - /// - /// If message is `Ok`, then it should be dispatched. If it is `Err`, then it's just - /// a sign that some other component has rejected the message even before it has - /// reached `dispatch` method (right now this may only be caused if we fail to decode - /// the whole message). - fn dispatch(bridge: InstanceId, id: MessageId, message: Result); -} diff --git a/polkadot/bridges/primitives/message-lane/Cargo.toml b/polkadot/bridges/primitives/message-lane/Cargo.toml deleted file mode 100644 index cbddcb1614606d383270125507d31e8a40867208..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/message-lane/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "bp-message-lane" -description = "Primitives of message lane module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } - -# Bridge dependencies - -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "sp-std/std" -] diff --git a/polkadot/bridges/primitives/message-lane/src/lib.rs b/polkadot/bridges/primitives/message-lane/src/lib.rs deleted file mode 100644 index de2dbd9ae63dae54c9657aec0ac7b3e4c82bd842..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/message-lane/src/lib.rs +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of message lane module. - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Generated by `DecodeLimit::decode_with_depth_limit` -#![allow(clippy::unnecessary_mut_passed)] - -use codec::{Decode, Encode}; -use frame_support::RuntimeDebug; -use sp_std::{collections::vec_deque::VecDeque, prelude::*}; - -pub mod source_chain; -pub mod target_chain; - -// Weight is reexported to avoid additional frame-support dependencies in message-lane related crates. -pub use frame_support::weights::Weight; - -/// Message lane pallet parameter. -pub trait Parameter: frame_support::Parameter { - /// Save parameter value in the runtime storage. - fn save(&self); -} - -/// Lane identifier. -pub type LaneId = [u8; 4]; - -/// Message nonce. Valid messages will never have 0 nonce. -pub type MessageNonce = u64; - -/// Message id as a tuple. -pub type MessageId = (LaneId, MessageNonce); - -/// Opaque message payload. We only decode this payload when it is dispatched. -pub type MessagePayload = Vec; - -/// Message key (unique message identifier) as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct MessageKey { - /// ID of the message lane. - pub lane_id: LaneId, - /// Message nonce. - pub nonce: MessageNonce, -} - -/// Message data as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct MessageData { - /// Message payload. - pub payload: MessagePayload, - /// Message delivery and dispatch fee, paid by the submitter. - pub fee: Fee, -} - -/// Message as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct Message { - /// Message key. - pub key: MessageKey, - /// Message data. - pub data: MessageData, -} - -/// Inbound lane data. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] -pub struct InboundLaneData { - /// Identifiers of relayers and messages that they have delivered to this lane (ordered by message nonce). - /// - /// This serves as a helper storage item, to allow the source chain to easily pay rewards - /// to the relayers who succesfuly delivered messages to the target chain (inbound lane). - /// - /// It is guaranteed to have at most N entries, where N is configured at the module level. - /// If there are N entries in this vec, then: - /// 1) all incoming messages are rejected if they're missing corresponding `proof-of(outbound-lane.state)`; - /// 2) all incoming messages are rejected if `proof-of(outbound-lane.state).last_delivered_nonce` is - /// equal to `self.last_confirmed_nonce`. - /// Given what is said above, all nonces in this queue are in range: - /// `(self.last_confirmed_nonce; self.last_delivered_nonce()]`. - /// - /// When a relayer sends a single message, both of MessageNonces are the same. - /// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the highest nonce. - /// Multiple dispatches from the same relayer are allowed. - pub relayers: VecDeque<(MessageNonce, MessageNonce, RelayerId)>, - - /// Nonce of the last message that - /// a) has been delivered to the target (this) chain and - /// b) the delivery has been confirmed on the source chain - /// - /// that the target chain knows of. - /// - /// This value is updated indirectly when an `OutboundLane` state of the source - /// chain is received alongside with new messages delivery. - pub last_confirmed_nonce: MessageNonce, -} - -impl Default for InboundLaneData { - fn default() -> Self { - InboundLaneData { - relayers: VecDeque::new(), - last_confirmed_nonce: 0, - } - } -} - -impl InboundLaneData { - /// Returns approximate size of the struct, given number of entries in the `relayers` set and - /// size of each entry. - /// - /// Returns `None` if size overflows `u32` limits. - pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32) -> Option { - let message_nonce_size = 8; - let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?; - let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?; - relayers_size.checked_add(message_nonce_size) - } - - /// Nonce of the last message that has been delivered to this (target) chain. - pub fn last_delivered_nonce(&self) -> MessageNonce { - self.relayers - .back() - .map(|(_, last_nonce, _)| *last_nonce) - .unwrap_or(self.last_confirmed_nonce) - } -} - -/// Gist of `InboundLaneData::relayers` field used by runtime APIs. -#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq)] -pub struct UnrewardedRelayersState { - /// Number of entries in the `InboundLaneData::relayers` set. - pub unrewarded_relayer_entries: MessageNonce, - /// Number of messages in the oldest entry of `InboundLaneData::relayers`. This is the - /// minimal number of reward proofs required to push out this entry from the set. - pub messages_in_oldest_entry: MessageNonce, - /// Total number of messages in the relayers vector. - pub total_messages: MessageNonce, -} - -/// Outbound lane data. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] -pub struct OutboundLaneData { - /// Nonce of oldest message that we haven't yet pruned. May point to not-yet-generated message if - /// all sent messages are already pruned. - pub oldest_unpruned_nonce: MessageNonce, - /// Nonce of latest message, received by bridged chain. - pub latest_received_nonce: MessageNonce, - /// Nonce of latest message, generated by us. - pub latest_generated_nonce: MessageNonce, -} - -impl Default for OutboundLaneData { - fn default() -> Self { - OutboundLaneData { - // it is 1 because we're pruning everything in [oldest_unpruned_nonce; latest_received_nonce] - oldest_unpruned_nonce: 1, - latest_received_nonce: 0, - latest_generated_nonce: 0, - } - } -} - -/// Returns total number of messages in the `InboundLaneData::relayers` vector. -/// -/// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`). -pub fn total_unrewarded_messages( - relayers: &VecDeque<(MessageNonce, MessageNonce, RelayerId)>, -) -> Option { - match (relayers.front(), relayers.back()) { - (Some((begin, _, _)), Some((_, end, _))) => { - if let Some(difference) = end.checked_sub(*begin) { - difference.checked_add(1) - } else { - Some(0) - } - } - _ => Some(0), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn total_unrewarded_messages_does_not_overflow() { - assert_eq!( - total_unrewarded_messages( - &vec![(0, 0, 1), (MessageNonce::MAX, MessageNonce::MAX, 2)] - .into_iter() - .collect() - ), - None, - ); - } - - #[test] - fn inbound_lane_data_returns_correct_hint() { - let expected_size = InboundLaneData::::encoded_size_hint(1, 13); - let actual_size = InboundLaneData { - relayers: (1u8..=13u8).map(|i| (i as _, i as _, i)).collect(), - last_confirmed_nonce: 13, - } - .encode() - .len(); - let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs(); - assert!( - difference / (std::cmp::min(actual_size, expected_size.unwrap() as usize) as f64) < 0.1, - "Too large difference between actual ({}) and expected ({:?}) inbound lane data size", - actual_size, - expected_size, - ); - } -} diff --git a/polkadot/bridges/primitives/message-lane/src/source_chain.rs b/polkadot/bridges/primitives/message-lane/src/source_chain.rs deleted file mode 100644 index d0dc36bb69352ca24aa380582d19331f8b8d12fb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/message-lane/src/source_chain.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of message lane module, that are used on the source chain. - -use crate::{InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; - -use bp_runtime::Size; -use frame_support::{Parameter, RuntimeDebug}; -use sp_std::{collections::btree_map::BTreeMap, fmt::Debug}; - -/// The sender of the message on the source chain. -pub type Sender = frame_system::RawOrigin; - -/// Relayers rewards, grouped by relayer account id. -pub type RelayersRewards = BTreeMap>; - -/// Single relayer rewards. -#[derive(RuntimeDebug, Default)] -pub struct RelayerRewards { - /// Total rewards that are to be paid to the relayer. - pub reward: Balance, - /// Total number of messages relayed by this relayer. - pub messages: MessageNonce, -} - -/// Target chain API. Used by source chain to verify target chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -pub trait TargetHeaderChain { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Proof that messages have been received by target chain. - type MessagesDeliveryProof: Parameter + Size; - - /// Verify message payload before we accept it. - /// - /// **CAUTION**: this is very important function. Incorrect implementation may lead - /// to stuck lanes and/or relayers loses. - /// - /// The proper implementation must ensure that the delivery-transaction with this - /// payload would (at least) be accepted into target chain transaction pool AND - /// eventually will be successfully 'mined'. The most obvious incorrect implementation - /// example would be implementation for BTC chain that accepts payloads larger than - /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer - /// will be unable to craft valid transaction => this (and all subsequent) messages will - /// never be delivered. - fn verify_message(payload: &Payload) -> Result<(), Self::Error>; - - /// Verify messages delivery proof and return lane && nonce of the latest recevied message. - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error>; -} - -/// Lane message verifier. -/// -/// Runtime developer may implement any additional validation logic over message-lane mechanism. -/// E.g. if lanes should have some security (e.g. you can only accept Lane1 messages from -/// Submitter1, Lane2 messages for those who has submitted first message to this lane, disable -/// Lane3 until some block, ...), then it may be built using this verifier. -/// -/// Any fee requirements should also be enforced here. -pub trait LaneMessageVerifier { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the lane. - fn verify_message( - submitter: &Sender, - delivery_and_dispatch_fee: &Fee, - lane: &LaneId, - outbound_data: &OutboundLaneData, - payload: &Payload, - ) -> Result<(), Self::Error>; -} - -/// Message delivery payment. It is called as a part of submit-message transaction. Transaction -/// submitter is paying (in source chain tokens/assets) for: -/// -/// 1) submit-message-transaction-fee itself. This fee is not included in the -/// `delivery_and_dispatch_fee` and is witheld by the regular transaction payment mechanism; -/// 2) message-delivery-transaction-fee. It is submitted to the target node by relayer; -/// 3) message-dispatch fee. It is paid by relayer for processing message by target chain; -/// 4) message-receiving-delivery-transaction-fee. It is submitted to the source node -/// by relayer. -/// -/// So to be sure that any non-altruist relayer would agree to deliver message, submitter -/// should set `delivery_and_dispatch_fee` to at least (equialent of): sum of fees from (2) -/// to (4) above, plus some interest for the relayer. -pub trait MessageDeliveryAndDispatchPayment { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Withhold/write-off delivery_and_dispatch_fee from submitter account to - /// some relayers-fund account. - fn pay_delivery_and_dispatch_fee( - submitter: &Sender, - fee: &Balance, - relayer_fund_account: &AccountId, - ) -> Result<(), Self::Error>; - - /// Pay rewards for delivering messages to the given relayers. - /// - /// The implementation may also choose to pay reward to the `confirmation_relayer`, which is - /// a relayer that has submitted delivery confirmation transaction. - fn pay_relayers_rewards( - confirmation_relayer: &AccountId, - relayers_rewards: RelayersRewards, - relayer_fund_account: &AccountId, - ); - - /// Perform some initialization in externalities-provided environment. - /// - /// For instance you may ensure that particular required accounts or storage items are present. - /// Returns the number of storage reads performed. - fn initialize(_relayer_fund_account: &AccountId) -> usize { - 0 - } -} - -/// Structure that may be used in place of `TargetHeaderChain`, `LaneMessageVerifier` and -/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden. -pub struct ForbidOutboundMessages; - -/// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_OUTBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all outbound messages"; - -impl TargetHeaderChain for ForbidOutboundMessages { - type Error = &'static str; - - type MessagesDeliveryProof = (); - - fn verify_message(_payload: &Payload) -> Result<(), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } - - fn verify_messages_delivery_proof( - _proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } -} - -impl LaneMessageVerifier for ForbidOutboundMessages { - type Error = &'static str; - - fn verify_message( - _submitter: &Sender, - _delivery_and_dispatch_fee: &Fee, - _lane: &LaneId, - _outbound_data: &OutboundLaneData, - _payload: &Payload, - ) -> Result<(), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } -} - -impl MessageDeliveryAndDispatchPayment for ForbidOutboundMessages { - type Error = &'static str; - - fn pay_delivery_and_dispatch_fee( - _submitter: &Sender, - _fee: &Balance, - _relayer_fund_account: &AccountId, - ) -> Result<(), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } - - fn pay_relayers_rewards( - _confirmation_relayer: &AccountId, - _relayers_rewards: RelayersRewards, - _relayer_fund_account: &AccountId, - ) { - } -} diff --git a/polkadot/bridges/primitives/message-lane/src/target_chain.rs b/polkadot/bridges/primitives/message-lane/src/target_chain.rs deleted file mode 100644 index 765ce64f63b3926bd463e0068d1fa8495d02c301..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/message-lane/src/target_chain.rs +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of message lane module, that are used on the target chain. - -use crate::{LaneId, Message, MessageData, MessageKey, OutboundLaneData}; - -use bp_runtime::Size; -use codec::{Decode, Encode, Error as CodecError}; -use frame_support::{weights::Weight, Parameter, RuntimeDebug}; -use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, prelude::*}; - -/// Proved messages from the source chain. -pub type ProvedMessages = BTreeMap>; - -/// Proved messages from single lane of the source chain. -#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq)] -pub struct ProvedLaneMessages { - /// Optional outbound lane state. - pub lane_state: Option, - /// Messages sent through this lane. - pub messages: Vec, -} - -/// Message data with decoded dispatch payload. -#[derive(RuntimeDebug)] -pub struct DispatchMessageData { - /// Result of dispatch payload decoding. - pub payload: Result, - /// Message delivery and dispatch fee, paid by the submitter. - pub fee: Fee, -} - -/// Message with decoded dispatch payload. -#[derive(RuntimeDebug)] -pub struct DispatchMessage { - /// Message key. - pub key: MessageKey, - /// Message data with decoded dispatch payload. - pub data: DispatchMessageData, -} - -/// Source chain API. Used by target chain, to verify source chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -pub trait SourceHeaderChain { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Proof that messages are sent from source chain. This may also include proof - /// of corresponding outbound lane states. - type MessagesProof: Parameter + Size; - - /// Verify messages proof and return proved messages. - /// - /// Returns error if either proof is incorrect, or the number of messages in the proof - /// is not matching the `messages_count`. - /// - /// Messages vector is required to be sorted by nonce within each lane. Out-of-order - /// messages will be rejected. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result>, Self::Error>; -} - -/// Called when inbound message is received. -pub trait MessageDispatch { - /// Decoded message payload type. Valid message may contain invalid payload. In this case - /// message is delivered, but dispatch fails. Therefore, two separate types of payload - /// (opaque `MessagePayload` used in delivery and this `DispatchPayload` used in dispatch). - type DispatchPayload: Decode; - - /// Estimate dispatch weight. - /// - /// This function must: (1) be instant and (2) return correct upper bound - /// of dispatch weight. - fn dispatch_weight(message: &DispatchMessage) -> Weight; - - /// Called when inbound message is received. - /// - /// It is up to the implementers of this trait to determine whether the message - /// is invalid (i.e. improperly encoded, has too large weight, ...) or not. - fn dispatch(message: DispatchMessage); -} - -impl Default for ProvedLaneMessages { - fn default() -> Self { - ProvedLaneMessages { - lane_state: None, - messages: Vec::new(), - } - } -} - -impl From> for DispatchMessage { - fn from(message: Message) -> Self { - DispatchMessage { - key: message.key, - data: message.data.into(), - } - } -} - -impl From> for DispatchMessageData { - fn from(data: MessageData) -> Self { - DispatchMessageData { - payload: DispatchPayload::decode(&mut &data.payload[..]), - fee: data.fee, - } - } -} - -/// Structure that may be used in place of `SourceHeaderChain` and `MessageDispatch` on chains, -/// where inbound messages are forbidden. -pub struct ForbidInboundMessages; - -/// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_INBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all inbound messages"; - -impl SourceHeaderChain for ForbidInboundMessages { - type Error = &'static str; - type MessagesProof = (); - - fn verify_messages_proof( - _proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result>, Self::Error> { - Err(ALL_INBOUND_MESSAGES_REJECTED) - } -} - -impl MessageDispatch for ForbidInboundMessages { - type DispatchPayload = (); - - fn dispatch_weight(_message: &DispatchMessage) -> Weight { - Weight::MAX - } - - fn dispatch(_message: DispatchMessage) {} -} diff --git a/polkadot/bridges/primitives/millau/Cargo.toml b/polkadot/bridges/primitives/millau/Cargo.toml deleted file mode 100644 index 124d8199e2c23adec6a08dcb7519358ccce0df5f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/millau/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "bp-millau" -description = "Primitives of Millau runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-message-lane = { path = "../message-lane", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -fixed-hash = { version = "0.7.0", default-features = false } -hash256-std-hasher = { version = "0.15.2", default-features = false } -impl-codec = { version = "0.5.0", default-features = false } -impl-serde = { version = "0.3.1", optional = true } -parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } -serde = { version = "1.0.101", optional = true, features = ["derive"] } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[features] -default = ["std"] -std = [ - "bp-message-lane/std", - "bp-runtime/std", - "fixed-hash/std", - "frame-support/std", - "frame-system/std", - "hash256-std-hasher/std", - "impl-codec/std", - "impl-serde", - "parity-util-mem/std", - "serde", - "sp-api/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", -] diff --git a/polkadot/bridges/primitives/millau/src/lib.rs b/polkadot/bridges/primitives/millau/src/lib.rs deleted file mode 100644 index 84096d116ef53e5586da73f1b1dee4abfa73632c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/millau/src/lib.rs +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Runtime-generated DecodeLimit::decode_all_With_depth_limit -#![allow(clippy::unnecessary_mut_passed)] - -mod millau_hash; - -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState}; -use bp_runtime::Chain; -use frame_support::{ - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, - Parameter, RuntimeDebug, -}; -use frame_system::limits; -use sp_core::Hasher as HasherT; -use sp_runtime::traits::Convert; -use sp_runtime::{ - traits::{IdentifyAccount, Verify}, - MultiSignature, MultiSigner, Perbill, -}; -use sp_std::prelude::*; -use sp_trie::{trie_types::Layout, TrieConfiguration}; - -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - -pub use millau_hash::MillauHash; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// Millau chain. This mostly depends on number of entries (and their density) in the storage trie. -/// Some reserve is reserved to account future chain growth. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. -pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; - -/// Maximum weight of single Millau block. -/// -/// This represents 0.5 seconds of compute assuming a target block time of six seconds. -pub const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND / 2; - -/// Represents the average portion of a block's weight that will be used by an -/// `on_initialize()` runtime call. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); - -/// Represents the portion of a block that will be used by Normal extrinsics. -pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// Maximal number of unrewarded relayer entries at inbound lane. -pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 1024; - -/// Maximal number of unconfirmed messages at inbound lane. -pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024; - -/// Weight of single regular message delivery transaction on Millau chain. -/// -/// This value is a result of `pallet_message_lane::Module::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. -/// The message must have dispatch weight set to zero. The result then must be rounded up to account -/// possible future runtime upgrades. -pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; - -/// Increase of delivery transaction weight on Millau chain with every additional message byte. -/// -/// This value is a result of `pallet_message_lane::WeightInfoExt::storage_proof_size_overhead(1)` call. The -/// result then must be rounded up to account possible future runtime upgrades. -pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; - -/// Maximal weight of single message delivery confirmation transaction on Millau chain. -/// -/// This value is a result of `pallet_message_lane::Module::receive_messages_delivery_proof` weight formula computation -/// for the case when single message is confirmed. The result then must be rounded up to account possible future -/// runtime upgrades. -pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; - -/// The length of a session (how often authorities change) on Millau measured in of number of blocks. -pub const SESSION_LENGTH: BlockNumber = 5 * time_units::MINUTES; - -/// Re-export `time_units` to make usage easier. -pub use time_units::*; - -/// Human readable time units defined in terms of number of blocks. -pub mod time_units { - use super::BlockNumber; - - pub const MILLISECS_PER_BLOCK: u64 = 6000; - pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - - pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); - pub const HOURS: BlockNumber = MINUTES * 60; - pub const DAYS: BlockNumber = HOURS * 24; -} - -/// Block number type used in Millau. -pub type BlockNumber = u64; - -/// Hash type used in Millau. -pub type Hash = ::Out; - -/// The type of an object that can produce hashes on Millau. -pub type Hasher = BlakeTwoAndKeccak256; - -/// The header type used by Millau. -pub type Header = sp_runtime::generic::Header; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = MultiSignature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = <::Signer as IdentifyAccount>::AccountId; - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// Balance of an account. -pub type Balance = u64; - -/// Millau chain. -#[derive(RuntimeDebug)] -pub struct Millau; - -impl Chain for Millau { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; -} - -/// Millau Hasher (Blake2-256 ++ Keccak-256) implementation. -#[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct BlakeTwoAndKeccak256; - -impl sp_core::Hasher for BlakeTwoAndKeccak256 { - type Out = MillauHash; - type StdHasher = hash256_std_hasher::Hash256StdHasher; - const LENGTH: usize = 64; - - fn hash(s: &[u8]) -> Self::Out { - let mut combined_hash = MillauHash::default(); - combined_hash.as_mut()[..32].copy_from_slice(&sp_io::hashing::blake2_256(s)); - combined_hash.as_mut()[32..].copy_from_slice(&sp_io::hashing::keccak_256(s)); - combined_hash - } -} - -impl sp_runtime::traits::Hash for BlakeTwoAndKeccak256 { - type Output = MillauHash; - - fn trie_root(input: Vec<(Vec, Vec)>) -> Self::Output { - Layout::::trie_root(input) - } - - fn ordered_trie_root(input: Vec>) -> Self::Output { - Layout::::ordered_trie_root(input) - } -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -/// We use this to get the account on Millau (target) which is derived from Rialto's (source) -/// account. We do this so we can fund the derived account on Millau at Genesis to it can pay -/// transaction fees. -/// -/// The reason we can use the same `AccountId` type for both chains is because they share the same -/// development seed phrase. -/// -/// Note that this should only be used for testing. -pub fn derive_account_from_rialto_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::RIALTO_BRIDGE_INSTANCE, id); - AccountIdConverter::convert(encoded_id) -} - -frame_support::parameter_types! { - pub BlockLength: limits::BlockLength = - limits::BlockLength::max_with_normal_ratio(2 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - // Allowance for Normal class - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // Allowance for Operational class - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Extra reserved space for Operational class - weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // By default Mandatory class is not limited at all. - // This parameter is used to derive maximal size of a single extrinsic. - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -/// Get the maximum weight (compute time) that a Normal extrinsic on the Millau chain can use. -pub fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) -} - -/// Get the maximum length in bytes that a Normal extrinsic on the Millau chain requires. -pub fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) -} - -/// Name of the `MillauHeaderApi::best_block` runtime method. -pub const BEST_MILLAU_BLOCKS_METHOD: &str = "MillauHeaderApi_best_blocks"; -/// Name of the `MillauHeaderApi::finalized_block` runtime method. -pub const FINALIZED_MILLAU_BLOCK_METHOD: &str = "MillauHeaderApi_finalized_block"; -/// Name of the `MillauHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_MILLAU_BLOCK_METHOD: &str = "MillauHeaderApi_is_known_block"; -/// Name of the `MillauHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_MILLAU_HEADERS_METHOD: &str = "MillauHeaderApi_incomplete_headers"; - -/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. -pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToMillauOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToMillauOutboundLaneApi_messages_dispatch_weight"; -/// Name of the `ToMillauOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_received_nonce"; -/// Name of the `ToMillauOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_generated_nonce"; - -/// Name of the `FromMillauInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_received_nonce"; -/// Name of the `FromMillauInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_confirmed_nonce"; -/// Name of the `FromMillauInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str = "FromMillauInboundLaneApi_unrewarded_relayers_state"; - -sp_api::decl_runtime_apis! { - /// API for querying information about Millau headers from the Bridge Pallet instance. - /// - /// This API is implemented by runtimes that are bridging with Millau chain, not the - /// Millau runtime itself. - pub trait MillauHeaderApi { - /// Returns number and hash of the best blocks known to the bridge module. - /// - /// Will return multiple headers if there are many headers at the same "best" height. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_blocks() -> Vec<(BlockNumber, Hash)>; - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (BlockNumber, Hash); - /// Returns numbers and hashes of headers that require finality proofs. - /// - /// An empty response means that there are no headers which currently require a - /// finality proof. - fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; - /// Returns true if the header is known to the runtime. - fn is_known_block(hash: Hash) -> bool; - /// Returns true if the header is considered finalized by the runtime. - fn is_finalized_block(hash: Hash) -> bool; - } - - /// Outbound message lane API for messages that are sent to Millau chain. - /// - /// This API is implemented by runtimes that are sending messages to Millau chain, not the - /// Millau runtime itself. - pub trait ToMillauOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Millau from this chain. - /// - /// Please keep in mind that this method returns lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; - /// Returns nonce of the latest message, received by bridged chain. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Returns nonce of the latest message, generated by given lane. - fn latest_generated_nonce(lane: LaneId) -> MessageNonce; - } - - /// Inbound message lane API for messages sent by Millau chain. - /// - /// This API is implemented by runtimes that are receiving messages from Millau chain, not the - /// Millau runtime itself. - pub trait FromMillauInboundLaneApi { - /// Returns nonce of the latest message, received by given lane. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. - fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; - /// State of the unrewarded relayers set at given lane. - fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::codec::Encode; - - #[test] - fn maximal_account_size_does_not_overflow_constant() { - assert!( - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize >= AccountId::default().encode().len(), - "Actual maximal size of encoded AccountId ({}) overflows expected ({})", - AccountId::default().encode().len(), - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - ); - } -} diff --git a/polkadot/bridges/primitives/millau/src/millau_hash.rs b/polkadot/bridges/primitives/millau/src/millau_hash.rs deleted file mode 100644 index e917329d2cc09a97d35069f70d3ef15e6716c1fb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/millau/src/millau_hash.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use parity_util_mem::MallocSizeOf; -use sp_runtime::traits::CheckEqual; - -// `sp_core::H512` can't be used, because it doesn't implement `CheckEqual`, which is required -// by `frame_system::Config::Hash`. - -fixed_hash::construct_fixed_hash! { - /// Hash type used in Millau chain. - #[derive(MallocSizeOf)] - pub struct MillauHash(64); -} - -#[cfg(feature = "std")] -impl_serde::impl_fixed_hash_serde!(MillauHash, 64); - -impl_codec::impl_fixed_hash_codec!(MillauHash, 64); - -impl CheckEqual for MillauHash { - #[cfg(feature = "std")] - fn check_equal(&self, other: &Self) { - use sp_core::hexdisplay::HexDisplay; - if self != other { - println!( - "Hash: given={}, expected={}", - HexDisplay::from(self.as_fixed_bytes()), - HexDisplay::from(other.as_fixed_bytes()), - ); - } - } - - #[cfg(not(feature = "std"))] - fn check_equal(&self, other: &Self) { - use frame_support::Printable; - - if self != other { - "Hash not equal".print(); - self.as_bytes().print(); - other.as_bytes().print(); - } - } -} diff --git a/polkadot/bridges/primitives/polkadot/Cargo.toml b/polkadot/bridges/primitives/polkadot/Cargo.toml deleted file mode 100644 index f7c9b9717d1cd5a5e9aa487412c99438670569d1..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/polkadot/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-polkadot" -description = "Primitives of Polkadot runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-message-lane = { path = "../message-lane", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-message-lane/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/polkadot/src/lib.rs b/polkadot/bridges/primitives/polkadot/src/lib.rs deleted file mode 100644 index a82dd5075bb67ca3dfd1772284983954680e0637..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/polkadot/src/lib.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Runtime-generated DecodeLimit::decode_all_with_depth_limit -#![allow(clippy::unnecessary_mut_passed)] - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::Chain; -use frame_support::{weights::Weight, RuntimeDebug}; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - generic, - traits::{BlakeTwo256, IdentifyAccount, Verify}, - MultiSignature, OpaqueExtrinsic as UncheckedExtrinsic, -}; -use sp_std::prelude::*; - -// TODO: may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 -/// Maximal number of messages in single delivery transaction. -pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; - -/// Maximal number of unrewarded relayer entries at inbound lane. -pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; - -// TODO: should be selected keeping in mind: -// finality delay on both chains + reward payout cost + messages throughput. -/// Maximal number of unconfirmed messages at inbound lane. -pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192; - -/// Block number type used in Polkadot. -pub type BlockNumber = u32; - -/// Hash type used in Polkadot. -pub type Hash = ::Out; - -/// The type of an object that can produce hashes on Polkadot. -pub type Hasher = BlakeTwo256; - -/// The header type used by Polkadot. -pub type Header = generic::Header; - -/// Signature type used by Polkadot. -pub type Signature = MultiSignature; - -/// Public key of account on Polkadot chain. -pub type AccountPublic = ::Signer; - -/// Id of account on Polkadot chain. -pub type AccountId = ::AccountId; - -/// Index of a transaction on the Polkadot chain. -pub type Nonce = u32; - -/// Block type of Polkadot. -pub type Block = generic::Block; - -/// Polkadot block signed with a Justification. -pub type SignedBlock = generic::SignedBlock; - -/// The balance of an account on Polkadot. -pub type Balance = u128; - -/// Polkadot chain. -#[derive(RuntimeDebug)] -pub struct Polkadot; - -impl Chain for Polkadot { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -/// Name of the `PolkadotHeaderApi::best_blocks` runtime method. -pub const BEST_POLKADOT_BLOCKS_METHOD: &str = "PolkadotHeaderApi_best_blocks"; -/// Name of the `PolkadotHeaderApi::finalized_block` runtime method. -pub const FINALIZED_POLKADOT_BLOCK_METHOD: &str = "PolkadotHeaderApi_finalized_block"; -/// Name of the `PolkadotHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_POLKADOT_BLOCK_METHOD: &str = "PolkadotHeaderApi_is_known_block"; -/// Name of the `PolkadotHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_POLKADOT_HEADERS_METHOD: &str = "PolkadotHeaderApi_incomplete_headers"; - -sp_api::decl_runtime_apis! { - /// API for querying information about Polkadot headers from the Bridge Pallet instance. - /// - /// This API is implemented by runtimes that are bridging with Polkadot chain, not the - /// Polkadot runtime itself. - pub trait PolkadotHeaderApi { - /// Returns number and hash of the best blocks known to the bridge module. - /// - /// Will return multiple headers if there are many headers at the same "best" height. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_blocks() -> Vec<(BlockNumber, Hash)>; - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (BlockNumber, Hash); - /// Returns numbers and hashes of headers that require finality proofs. - /// - /// An empty response means that there are no headers which currently require a - /// finality proof. - fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; - /// Returns true if the header is known to the runtime. - fn is_known_block(hash: Hash) -> bool; - /// Returns true if the header is considered finalized by the runtime. - fn is_finalized_block(hash: Hash) -> bool; - } - - /// Outbound message lane API for messages that are sent to Polkadot chain. - /// - /// This API is implemented by runtimes that are sending messages to Polkadot chain, not the - /// Polkadot runtime itself. - pub trait ToPolkadotOutboundLaneApi { - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; - /// Returns nonce of the latest message, received by bridged chain. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Returns nonce of the latest message, generated by given lane. - fn latest_generated_nonce(lane: LaneId) -> MessageNonce; - } - - /// Inbound message lane API for messages sent by Polkadot chain. - /// - /// This API is implemented by runtimes that are receiving messages from Polkadot chain, not the - /// Polkadot runtime itself. - pub trait FromPolkadotInboundLaneApi { - /// Returns nonce of the latest message, received by given lane. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. - fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; - } -} diff --git a/polkadot/bridges/primitives/rialto/Cargo.toml b/polkadot/bridges/primitives/rialto/Cargo.toml deleted file mode 100644 index d6c12fc84809b6593ee739d4cd75ca6864660497..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/rialto/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-rialto" -description = "Primitives of Rialto runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-message-lane = { path = "../message-lane", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[features] -default = ["std"] -std = [ - "bp-message-lane/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/rialto/src/lib.rs b/polkadot/bridges/primitives/rialto/src/lib.rs deleted file mode 100644 index 706e2f27854d26e823d648fd19447bbc82fc0c59..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/rialto/src/lib.rs +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] -// Runtime-generated DecodeLimit::decode_all_With_depth_limit -#![allow(clippy::unnecessary_mut_passed)] - -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState}; -use bp_runtime::Chain; -use frame_support::{ - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, - Parameter, RuntimeDebug, -}; -use frame_system::limits; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - traits::{BlakeTwo256, Convert, IdentifyAccount, Verify}, - MultiSignature, MultiSigner, Perbill, -}; -use sp_std::prelude::*; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// Rialto chain. This mostly depends on number of entries (and their density) in the storage trie. -/// Some reserve is reserved to account future chain growth. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. -pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; - -/// Maximal weight of single Rialto block. -/// -/// This represents two seconds of compute assuming a target block time of six seconds. -pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; - -/// Represents the average portion of a block's weight that will be used by an -/// `on_initialize()` runtime call. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); - -/// Represents the portion of a block that will be used by Normal extrinsics. -pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// Maximal number of unrewarded relayer entries at inbound lane. -pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; - -/// Maximal number of unconfirmed messages at inbound lane. -pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128; - -/// Weight of single regular message delivery transaction on Rialto chain. -/// -/// This value is a result of `pallet_message_lane::Module::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_message_lane::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. -/// The message must have dispatch weight set to zero. The result then must be rounded up to account -/// possible future runtime upgrades. -pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; - -/// Increase of delivery transaction weight on Rialto chain with every additional message byte. -/// -/// This value is a result of `pallet_message_lane::WeightInfoExt::storage_proof_size_overhead(1)` call. The -/// result then must be rounded up to account possible future runtime upgrades. -pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; - -/// Maximal weight of single message delivery confirmation transaction on Rialto chain. -/// -/// This value is a result of `pallet_message_lane::Module::receive_messages_delivery_proof` weight formula computation -/// for the case when single message is confirmed. The result then must be rounded up to account possible future -/// runtime upgrades. -pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; - -/// The length of a session (how often authorities change) on Rialto measured in of number of blocks. -pub const SESSION_LENGTH: BlockNumber = 4; - -/// Re-export `time_units` to make usage easier. -pub use time_units::*; - -/// Human readable time units defined in terms of number of blocks. -pub mod time_units { - use super::BlockNumber; - - pub const MILLISECS_PER_BLOCK: u64 = 6000; - pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - - pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); - pub const HOURS: BlockNumber = MINUTES * 60; - pub const DAYS: BlockNumber = HOURS * 24; -} - -/// Block number type used in Rialto. -pub type BlockNumber = u32; - -/// Hash type used in Rialto. -pub type Hash = ::Out; - -/// The type of an object that can produce hashes on Rialto. -pub type Hasher = BlakeTwo256; - -/// The header type used by Rialto. -pub type Header = sp_runtime::generic::Header; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = MultiSignature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = <::Signer as IdentifyAccount>::AccountId; - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// Balance of an account. -pub type Balance = u128; - -/// Rialto chain. -#[derive(RuntimeDebug)] -pub struct Rialto; - -impl Chain for Rialto { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -// We use this to get the account on Rialto (target) which is derived from Millau's (source) -// account. We do this so we can fund the derived account on Rialto at Genesis to it can pay -// transaction fees. -// -// The reason we can use the same `AccountId` type for both chains is because they share the same -// development seed phrase. -// -// Note that this should only be used for testing. -pub fn derive_account_from_millau_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::MILLAU_BRIDGE_INSTANCE, id); - AccountIdConverter::convert(encoded_id) -} - -frame_support::parameter_types! { - pub BlockLength: limits::BlockLength = - limits::BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - // Allowance for Normal class - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // Allowance for Operational class - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Extra reserved space for Operational class - weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // By default Mandatory class is not limited at all. - // This parameter is used to derive maximal size of a single extrinsic. - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -/// Get the maximum weight (compute time) that a Normal extrinsic on the Millau chain can use. -pub fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) -} - -/// Get the maximum length in bytes that a Normal extrinsic on the Millau chain requires. -pub fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) -} - -/// Name of the `RialtoHeaderApi::best_blocks` runtime method. -pub const BEST_RIALTO_BLOCKS_METHOD: &str = "RialtoHeaderApi_best_blocks"; -/// Name of the `RialtoHeaderApi::finalized_block` runtime method. -pub const FINALIZED_RIALTO_BLOCK_METHOD: &str = "RialtoHeaderApi_finalized_block"; -/// Name of the `RialtoHeaderApi::is_known_block` runtime method. -pub const IS_KNOWN_RIALTO_BLOCK_METHOD: &str = "RialtoHeaderApi_is_known_block"; -/// Name of the `RialtoHeaderApi::incomplete_headers` runtime method. -pub const INCOMPLETE_RIALTO_HEADERS_METHOD: &str = "RialtoHeaderApi_incomplete_headers"; - -/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. -pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToRialtoOutboundLaneApi::messages_dispatch_weight` runtime method. -pub const TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRialtoOutboundLaneApi_messages_dispatch_weight"; -/// Name of the `ToRialtoOutboundLaneApi::latest_generated_nonce` runtime method. -pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_generated_nonce"; -/// Name of the `ToRialtoOutboundLaneApi::latest_received_nonce` runtime method. -pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_received_nonce"; - -/// Name of the `FromRialtoInboundLaneApi::latest_received_nonce` runtime method. -pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_received_nonce"; -/// Name of the `FromRialtoInboundLaneApi::latest_onfirmed_nonce` runtime method. -pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_confirmed_nonce"; -/// Name of the `FromRialtoInboundLaneApi::unrewarded_relayers_state` runtime method. -pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str = "FromRialtoInboundLaneApi_unrewarded_relayers_state"; - -sp_api::decl_runtime_apis! { - /// API for querying information about Rialto headers from the Bridge Pallet instance. - /// - /// This API is implemented by runtimes that are bridging with Rialto chain, not the - /// Rialto runtime itself. - pub trait RialtoHeaderApi { - /// Returns number and hash of the best blocks known to the bridge module. - /// - /// Will return multiple headers if there are many headers at the same "best" height. - /// - /// The caller should only submit an `import_header` transaction that makes - /// (or leads to making) other header the best one. - fn best_blocks() -> Vec<(BlockNumber, Hash)>; - /// Returns number and hash of the best finalized block known to the bridge module. - fn finalized_block() -> (BlockNumber, Hash); - /// Returns numbers and hashes of headers that require finality proofs. - /// - /// An empty response means that there are no headers which currently require a - /// finality proof. - fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; - /// Returns true if the header is known to the runtime. - fn is_known_block(hash: Hash) -> bool; - /// Returns true if the header is considered finalized by the runtime. - fn is_finalized_block(hash: Hash) -> bool; - } - - /// Outbound message lane API for messages that are sent to Rialto chain. - /// - /// This API is implemented by runtimes that are sending messages to Rialto chain, not the - /// Rialto runtime itself. - pub trait ToRialtoOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Rialto from this chain. - /// - /// Please keep in mind that this method returns lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - ) -> Option; - /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn messages_dispatch_weight( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec<(MessageNonce, Weight, u32)>; - /// Returns nonce of the latest message, received by bridged chain. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Returns nonce of the latest message, generated by given lane. - fn latest_generated_nonce(lane: LaneId) -> MessageNonce; - } - - /// Inbound message lane API for messages sent by Rialto chain. - /// - /// This API is implemented by runtimes that are receiving messages from Rialto chain, not the - /// Rialto runtime itself. - pub trait FromRialtoInboundLaneApi { - /// Returns nonce of the latest message, received by given lane. - fn latest_received_nonce(lane: LaneId) -> MessageNonce; - /// Nonce of latest message that has been confirmed to the bridged chain. - fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; - /// State of the unrewarded relayers set at given lane. - fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::codec::Encode; - - #[test] - fn maximal_account_size_does_not_overflow_constant() { - assert!( - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize >= AccountId::default().encode().len(), - "Actual maximal size of encoded AccountId ({}) overflows expected ({})", - AccountId::default().encode().len(), - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - ); - } -} diff --git a/polkadot/bridges/primitives/runtime/Cargo.toml b/polkadot/bridges/primitives/runtime/Cargo.toml deleted file mode 100644 index b8e511fa56f82418b8e2e4241f9c3bc6709baf5c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/runtime/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "bp-runtime" -description = "Primitives that may be used at (bridges) runtime level." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -num-traits = { version = "0.2", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "num-traits/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/runtime/src/chain.rs b/polkadot/bridges/primitives/runtime/src/chain.rs deleted file mode 100644 index 348b5bf1d277d70631d1477324edbf1281be7483..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/runtime/src/chain.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use frame_support::Parameter; -use num_traits::AsPrimitive; -use sp_runtime::traits::{ - AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, MaybeMallocSizeOf, MaybeSerializeDeserialize, - Member, SimpleBitOps, -}; -use sp_std::str::FromStr; - -/// Minimal Substrate-based chain representation that may be used from no_std environment. -pub trait Chain: Send + Sync + 'static { - /// A type that fulfills the abstract idea of what a Substrate block number is. - // Constraits come from the associated Number type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Number - // - // Note that the `AsPrimitive` trait is required by the GRANDPA justification - // verifier, and is not usually part of a Substrate Header's Number type. - type BlockNumber: Parameter - + Member - + MaybeSerializeDeserialize - + sp_std::hash::Hash - + Copy - + Default - + MaybeDisplay - + AtLeast32BitUnsigned - + FromStr - + MaybeMallocSizeOf - + AsPrimitive - + Default; - - /// A type that fulfills the abstract idea of what a Substrate hash is. - // Constraits come from the associated Hash type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hash - type Hash: Parameter - + Member - + MaybeSerializeDeserialize - + sp_std::hash::Hash - + Ord - + Copy - + MaybeDisplay - + Default - + SimpleBitOps - + AsRef<[u8]> - + AsMut<[u8]> - + MaybeMallocSizeOf; - - /// A type that fulfills the abstract idea of what a Substrate hasher (a type - /// that produces hashes) is. - // Constraits come from the associated Hashing type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hashing - type Hasher: HashT; - - /// A type that fulfills the abstract idea of what a Substrate header is. - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html - type Header: Parameter + HeaderT + MaybeSerializeDeserialize; -} - -/// Block number used by the chain. -pub type BlockNumberOf = ::BlockNumber; - -/// Hash type used by the chain. -pub type HashOf = ::Hash; - -/// Hasher type used by the chain. -pub type HasherOf = ::Hasher; - -/// Header type used by the chain. -pub type HeaderOf = ::Header; diff --git a/polkadot/bridges/primitives/runtime/src/lib.rs b/polkadot/bridges/primitives/runtime/src/lib.rs deleted file mode 100644 index 1afb1b1fd827930bdd633646ef346945e402f332..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/runtime/src/lib.rs +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives that may be used at (bridges) runtime level. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::Encode; -use sp_core::hash::H256; -use sp_io::hashing::blake2_256; -use sp_std::convert::TryFrom; - -pub use chain::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; - -mod chain; - -/// Use this when something must be shared among all instances. -pub const NO_INSTANCE_ID: InstanceId = [0, 0, 0, 0]; - -/// Bridge-with-Rialto instance id. -pub const RIALTO_BRIDGE_INSTANCE: InstanceId = *b"rlto"; - -/// Bridge-with-Millau instance id. -pub const MILLAU_BRIDGE_INSTANCE: InstanceId = *b"mlau"; - -/// Bridge-with-Polkadot instance id. -pub const POLKADOT_BRIDGE_INSTANCE: InstanceId = *b"pdot"; - -/// Bridge-with-Kusama instance id. -pub const KUSAMA_BRIDGE_INSTANCE: InstanceId = *b"ksma"; - -/// Call-dispatch module prefix. -pub const CALL_DISPATCH_MODULE_PREFIX: &[u8] = b"pallet-bridge/call-dispatch"; - -/// Message-lane module prefix. -pub const MESSAGE_LANE_MODULE_PREFIX: &[u8] = b"pallet-bridge/message-lane"; - -/// A unique prefix for entropy when generating cross-chain account IDs. -pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/account"; - -/// A unique prefix for entropy when generating a cross-chain account ID for the Root account. -pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root"; - -/// Id of deployed module instance. We have a bunch of pallets that may be used in -/// different bridges. E.g. message-lane pallet may be deployed twice in the same -/// runtime to bridge ThisChain with Chain1 and Chain2. Sometimes we need to be able -/// to identify deployed instance dynamically. This type is used for that. -pub type InstanceId = [u8; 4]; - -/// Type of accounts on the source chain. -pub enum SourceAccount { - /// An account that belongs to Root (privileged origin). - Root, - /// A non-privileged account. - /// - /// The embedded account ID may or may not have a private key depending on the "owner" of the - /// account (private key, pallet, proxy, etc.). - Account(T), -} - -/// Derive an account ID from a foreign account ID. -/// -/// This function returns an encoded Blake2 hash. It is the responsibility of the caller to ensure -/// this can be succesfully decoded into an AccountId. -/// -/// The `bridge_id` is used to provide extra entropy when producing account IDs. This helps prevent -/// AccountId collisions between different bridges on a single target chain. -/// -/// Note: If the same `bridge_id` is used across different chains (for example, if one source chain -/// is bridged to multiple target chains), then all the derived accounts would be the same across -/// the different chains. This could negatively impact users' privacy across chains. -pub fn derive_account_id(bridge_id: InstanceId, id: SourceAccount) -> H256 -where - AccountId: Encode, -{ - match id { - SourceAccount::Root => (ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256), - SourceAccount::Account(id) => (ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256), - } - .into() -} - -/// Derive the account ID of the shared relayer fund account. -/// -/// This account is used to collect fees for relayers that are passing messages across the bridge. -/// -/// The account ID can be the same across different instances of `message-lane` if the same -/// `bridge_id` is used. -pub fn derive_relayer_fund_account_id(bridge_id: InstanceId) -> H256 { - ("relayer-fund-account", bridge_id).using_encoded(blake2_256).into() -} - -/// Anything that has size. -pub trait Size { - /// Return approximate size of this object (in bytes). - /// - /// This function should be lightweight. The result should not necessary be absolutely - /// accurate. - fn size_hint(&self) -> u32; -} - -impl Size for () { - fn size_hint(&self) -> u32 { - 0 - } -} - -/// Pre-computed size. -pub struct PreComputedSize(pub usize); - -impl Size for PreComputedSize { - fn size_hint(&self) -> u32 { - u32::try_from(self.0).unwrap_or(u32::MAX) - } -} diff --git a/polkadot/bridges/primitives/test-utils/Cargo.toml b/polkadot/bridges/primitives/test-utils/Cargo.toml deleted file mode 100644 index 396e0e764635cd52cb090fb41cadad63a84de72d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/test-utils/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "bp-test-utils" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -finality-grandpa = { version = "0.14.0" } -bp-header-chain = { path = "../header-chain" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/primitives/test-utils/src/lib.rs b/polkadot/bridges/primitives/test-utils/src/lib.rs deleted file mode 100644 index 182eb2cb796b76e134ad360f92a36b3ab00e686b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/primitives/test-utils/src/lib.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for testing runtime code. -//! -//! Unlike other crates in the `primitives` folder, this crate does *not* need to compile in a -//! `no_std` environment. This is fine because this code should only be used, as the name implies, -//! in tests. - -use bp_header_chain::justification::GrandpaJustification; -use finality_grandpa::voter_set::VoterSet; -use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight}; -use sp_finality_grandpa::{AuthoritySignature, SetId}; -use sp_keyring::Ed25519Keyring; -use sp_runtime::traits::Header as HeaderT; -use sp_runtime::traits::{One, Zero}; - -pub const TEST_GRANDPA_ROUND: u64 = 1; -pub const TEST_GRANDPA_SET_ID: SetId = 1; - -/// Get a valid Grandpa justification for a header given a Grandpa round, authority set ID, and -/// authority list. -pub fn make_justification_for_header( - header: &H, - round: u64, - set_id: SetId, - authorities: &[(AuthorityId, AuthorityWeight)], -) -> GrandpaJustification { - let (target_hash, target_number) = (header.hash(), *header.number()); - let mut precommits = vec![]; - let mut votes_ancestries = vec![]; - - // We want to make sure that the header included in the vote ancestries - // is actually related to our target header - let mut precommit_header = test_header::(target_number + One::one()); - precommit_header.set_parent_hash(target_hash); - - // I'm using the same header for all the voters since it doesn't matter as long - // as they all vote on blocks _ahead_ of the one we're interested in finalizing - for (id, _weight) in authorities.iter() { - let signer = extract_keyring(&id); - let precommit = signed_precommit::( - signer, - (precommit_header.hash(), *precommit_header.number()), - round, - set_id, - ); - precommits.push(precommit); - votes_ancestries.push(precommit_header.clone()); - } - - GrandpaJustification { - round, - commit: finality_grandpa::Commit { - target_hash, - target_number, - precommits, - }, - votes_ancestries, - } -} - -fn signed_precommit( - signer: Ed25519Keyring, - target: (H::Hash, H::Number), - round: u64, - set_id: SetId, -) -> finality_grandpa::SignedPrecommit { - let precommit = finality_grandpa::Precommit { - target_hash: target.0, - target_number: target.1, - }; - let encoded = - sp_finality_grandpa::localized_payload(round, set_id, &finality_grandpa::Message::Precommit(precommit.clone())); - let signature = signer.sign(&encoded[..]).into(); - finality_grandpa::SignedPrecommit { - precommit, - signature, - id: signer.public().into(), - } -} - -/// Get a header for testing. -/// -/// The correct parent hash will be used if given a non-zero header. -pub fn test_header(number: H::Number) -> H { - let mut header = H::new( - number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - - if number != Zero::zero() { - let parent_hash = test_header::(number - One::one()).hash(); - header.set_parent_hash(parent_hash); - } - - header -} - -/// Convenience function for generating a Header ID at a given block number. -pub fn header_id(index: u8) -> (H::Hash, H::Number) { - (test_header::(index.into()).hash(), index.into()) -} - -/// Get the identity of a test account given an ED25519 Public key. -pub fn extract_keyring(id: &AuthorityId) -> Ed25519Keyring { - let mut raw_public = [0; 32]; - raw_public.copy_from_slice(id.as_ref()); - Ed25519Keyring::from_raw_public(raw_public).unwrap() -} - -/// Get a valid set of voters for a Grandpa round. -pub fn voter_set() -> VoterSet { - VoterSet::new(authority_list()).unwrap() -} - -/// Convenience function to get a list of Grandpa authorities. -pub fn authority_list() -> AuthorityList { - vec![(alice(), 1), (bob(), 1), (charlie(), 1)] -} - -/// Get the Public key of the Alice test account. -pub fn alice() -> AuthorityId { - Ed25519Keyring::Alice.public().into() -} - -/// Get the Public key of the Bob test account. -pub fn bob() -> AuthorityId { - Ed25519Keyring::Bob.public().into() -} - -/// Get the Public key of the Charlie test account. -pub fn charlie() -> AuthorityId { - Ed25519Keyring::Charlie.public().into() -} diff --git a/polkadot/bridges/relays/ethereum-client/Cargo.toml b/polkadot/bridges/relays/ethereum-client/Cargo.toml deleted file mode 100644 index b0f6485ffd384aaff01fcd80b3f9351e1e321eb7..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum-client/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "relay-ethereum-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -hex-literal = "0.3" -jsonrpsee = { git = "https://github.com/svyatonik/jsonrpsee.git", branch = "shared-client-in-rpc-api", default-features = false, features = ["http"] } -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } -log = "0.4.11" -relay-utils = { path = "../utils" } -web3 = { version = "0.15", git = "https://github.com/tomusdrw/rust-web3.git", branch ="td-ethabi", default-features = false } diff --git a/polkadot/bridges/relays/ethereum-client/src/client.rs b/polkadot/bridges/relays/ethereum-client/src/client.rs deleted file mode 100644 index 30a62a400e1dda7a1aed1f78bc53505c7247f406..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum-client/src/client.rs +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::rpc::Ethereum; -use crate::types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx, SyncState, Transaction, - TransactionHash, H256, U256, -}; -use crate::{ConnectionParams, Error, Result}; - -use jsonrpsee::raw::RawClient; -use jsonrpsee::transport::http::HttpTransportClient; -use jsonrpsee::Client as RpcClient; - -/// Number of headers missing from the Ethereum node for us to consider node not synced. -const MAJOR_SYNC_BLOCKS: u64 = 5; - -/// The client used to interact with an Ethereum node through RPC. -#[derive(Clone)] -pub struct Client { - params: ConnectionParams, - client: RpcClient, -} - -impl Client { - /// Create a new Ethereum RPC Client. - pub fn new(params: ConnectionParams) -> Self { - Self { - client: Self::build_client(¶ms), - params, - } - } - - /// Build client to use in connection. - fn build_client(params: &ConnectionParams) -> RpcClient { - let uri = format!("http://{}:{}", params.host, params.port); - let transport = HttpTransportClient::new(&uri); - let raw_client = RawClient::new(transport); - raw_client.into() - } - - /// Reopen client connection. - pub fn reconnect(&mut self) { - self.client = Self::build_client(&self.params); - } -} - -impl Client { - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - match Ethereum::syncing(&self.client).await? { - SyncState::NotSyncing => Ok(()), - SyncState::Syncing(syncing) => { - let missing_headers = syncing.highest_block.saturating_sub(syncing.current_block); - if missing_headers > MAJOR_SYNC_BLOCKS.into() { - return Err(Error::ClientNotSynced(missing_headers)); - } - - Ok(()) - } - } - } - - /// Estimate gas usage for the given call. - pub async fn estimate_gas(&self, call_request: CallRequest) -> Result { - Ok(Ethereum::estimate_gas(&self.client, call_request).await?) - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn best_block_number(&self) -> Result { - Ok(Ethereum::block_number(&self.client).await?.as_u64()) - } - - /// Retrieve number of the best known block from the Ethereum node. - pub async fn header_by_number(&self, block_number: u64) -> Result
{ - let get_full_tx_objects = false; - let header = Ethereum::get_block_by_number(&self.client, block_number, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - } - - /// Retrieve block header by its hash from Ethereum node. - pub async fn header_by_hash(&self, hash: H256) -> Result
{ - let get_full_tx_objects = false; - let header = Ethereum::get_block_by_hash(&self.client, hash, get_full_tx_objects).await?; - match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { - true => Ok(header), - false => Err(Error::IncompleteHeader), - } - } - - /// Retrieve block header and its transactions by its number from Ethereum node. - pub async fn header_by_number_with_transactions(&self, number: u64) -> Result { - let get_full_tx_objects = true; - let header = Ethereum::get_block_by_number_with_transactions(&self.client, number, get_full_tx_objects).await?; - - let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader); - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction); - } - - Ok(header) - } - - /// Retrieve block header and its transactions by its hash from Ethereum node. - pub async fn header_by_hash_with_transactions(&self, hash: H256) -> Result { - let get_full_tx_objects = true; - let header = Ethereum::get_block_by_hash_with_transactions(&self.client, hash, get_full_tx_objects).await?; - - let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); - if !is_complete_header { - return Err(Error::IncompleteHeader); - } - - let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); - if !is_complete_transactions { - return Err(Error::IncompleteTransaction); - } - - Ok(header) - } - - /// Retrieve transaction by its hash from Ethereum node. - pub async fn transaction_by_hash(&self, hash: H256) -> Result> { - Ok(Ethereum::transaction_by_hash(&self.client, hash).await?) - } - - /// Retrieve transaction receipt by transaction hash. - pub async fn transaction_receipt(&self, transaction_hash: H256) -> Result { - Ok(Ethereum::get_transaction_receipt(&self.client, transaction_hash).await?) - } - - /// Get the nonce of the given account. - pub async fn account_nonce(&self, address: Address) -> Result { - Ok(Ethereum::get_transaction_count(&self.client, address).await?) - } - - /// Submit an Ethereum transaction. - /// - /// The transaction must already be signed before sending it through this method. - pub async fn submit_transaction(&self, signed_raw_tx: SignedRawTx) -> Result { - let transaction = Bytes(signed_raw_tx); - let tx_hash = Ethereum::submit_transaction(&self.client, transaction).await?; - log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash); - Ok(tx_hash) - } - - /// Call Ethereum smart contract. - pub async fn eth_call(&self, call_transaction: CallRequest) -> Result { - Ok(Ethereum::call(&self.client, call_transaction).await?) - } -} diff --git a/polkadot/bridges/relays/ethereum-client/src/error.rs b/polkadot/bridges/relays/ethereum-client/src/error.rs deleted file mode 100644 index 0f47891138aba0e0abcddc1cf87e45c290970990..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum-client/src/error.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC errors. - -use crate::types::U256; - -use jsonrpsee::client::RequestError; -use relay_utils::MaybeConnectionError; - -/// Result type used by Ethereum client. -pub type Result = std::result::Result; - -/// Errors that can occur only when interacting with -/// an Ethereum node through RPC. -#[derive(Debug)] -pub enum Error { - /// An error that can occur when making an HTTP request to - /// an JSON-RPC client. - Request(RequestError), - /// Failed to parse response. - ResponseParseFailed(String), - /// We have received a header with missing fields. - IncompleteHeader, - /// We have received a transaction missing a `raw` field. - IncompleteTransaction, - /// An invalid Substrate block number was received from - /// an Ethereum node. - InvalidSubstrateBlockNumber, - /// An invalid index has been received from an Ethereum node. - InvalidIncompleteIndex, - /// The client we're connected to is not synced, so we can't rely on its state. Contains - /// number of unsynced headers. - ClientNotSynced(U256), -} - -impl From for Error { - fn from(error: RequestError) -> Self { - Error::Request(error) - } -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - matches!( - *self, - Error::Request(RequestError::TransportError(_)) | Error::ClientNotSynced(_), - ) - } -} - -impl ToString for Error { - fn to_string(&self) -> String { - match self { - Self::Request(e) => e.to_string(), - Self::ResponseParseFailed(e) => e.to_string(), - Self::IncompleteHeader => { - "Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom)" - .to_string() - } - Self::IncompleteTransaction => "Incomplete Ethereum Transaction (missing required field - raw)".to_string(), - Self::InvalidSubstrateBlockNumber => "Received an invalid Substrate block from Ethereum Node".to_string(), - Self::InvalidIncompleteIndex => "Received an invalid incomplete index from Ethereum Node".to_string(), - Self::ClientNotSynced(missing_headers) => { - format!("Ethereum client is not synced: syncing {} headers", missing_headers) - } - } - } -} diff --git a/polkadot/bridges/relays/ethereum-client/src/lib.rs b/polkadot/bridges/relays/ethereum-client/src/lib.rs deleted file mode 100644 index 8c5a00e01b4d7119f2fc70307a6461d666ed0af1..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum-client/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools to interact with (Open) Ethereum node using RPC methods. - -#![warn(missing_docs)] - -mod client; -mod error; -mod rpc; -mod sign; - -pub use crate::client::Client; -pub use crate::error::{Error, Result}; -pub use crate::sign::{sign_and_submit_transaction, SigningParams}; - -pub mod types; - -/// Ethereum connection params. -#[derive(Debug, Clone)] -pub struct ConnectionParams { - /// Ethereum RPC host. - pub host: String, - /// Ethereum RPC port. - pub port: u16, -} - -impl Default for ConnectionParams { - fn default() -> Self { - ConnectionParams { - host: "localhost".into(), - port: 8545, - } - } -} diff --git a/polkadot/bridges/relays/ethereum-client/src/rpc.rs b/polkadot/bridges/relays/ethereum-client/src/rpc.rs deleted file mode 100644 index 3fa4f6ceb9cd9d311ebf7db728628d823e245097..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum-client/src/rpc.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum node RPC interface. - -// The compiler doesn't think we're using the -// code from rpc_api! -#![allow(dead_code)] -#![allow(unused_variables)] - -use crate::types::{ - Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, TransactionHash, - H256, U256, U64, -}; - -jsonrpsee::rpc_api! { - pub(crate) Ethereum { - #[rpc(method = "eth_syncing", positional_params)] - fn syncing() -> SyncState; - #[rpc(method = "eth_estimateGas", positional_params)] - fn estimate_gas(call_request: CallRequest) -> U256; - #[rpc(method = "eth_blockNumber", positional_params)] - fn block_number() -> U64; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number(block_number: U64, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash(hash: H256, full_tx_objs: bool) -> Header; - #[rpc(method = "eth_getBlockByNumber", positional_params)] - fn get_block_by_number_with_transactions(number: U64, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getBlockByHash", positional_params)] - fn get_block_by_hash_with_transactions(hash: H256, full_tx_objs: bool) -> HeaderWithTransactions; - #[rpc(method = "eth_getTransactionByHash", positional_params)] - fn transaction_by_hash(hash: H256) -> Option; - #[rpc(method = "eth_getTransactionReceipt", positional_params)] - fn get_transaction_receipt(transaction_hash: H256) -> Receipt; - #[rpc(method = "eth_getTransactionCount", positional_params)] - fn get_transaction_count(address: Address) -> U256; - #[rpc(method = "eth_submitTransaction", positional_params)] - fn submit_transaction(transaction: Bytes) -> TransactionHash; - #[rpc(method = "eth_call", positional_params)] - fn call(transaction_call: CallRequest) -> Bytes; - } -} diff --git a/polkadot/bridges/relays/ethereum-client/src/sign.rs b/polkadot/bridges/relays/ethereum-client/src/sign.rs deleted file mode 100644 index 462cb5dbd7d27dca759df726c841dd198e349e7c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum-client/src/sign.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::types::{Address, CallRequest, U256}; -use crate::{Client, Result}; -use bp_eth_poa::signatures::{secret_to_address, SignTransaction}; -use hex_literal::hex; -use secp256k1::SecretKey; - -/// Ethereum signing params. -#[derive(Clone, Debug)] -pub struct SigningParams { - /// Ethereum chain id. - pub chain_id: u64, - /// Ethereum transactions signer. - pub signer: SecretKey, - /// Gas price we agree to pay. - pub gas_price: U256, -} - -impl Default for SigningParams { - fn default() -> Self { - SigningParams { - chain_id: 0x11, // Parity dev chain - // account that has a lot of ether when we run instant seal engine - // address: 0x00a329c0648769a73afac7f9381e08fb43dbea72 - // secret: 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 - signer: SecretKey::parse(&hex!( - "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" - )) - .expect("secret is hardcoded, thus valid; qed"), - gas_price: 8_000_000_000u64.into(), // 8 Gwei - } - } -} - -/// Sign and submit tranaction using given Ethereum client. -pub async fn sign_and_submit_transaction( - client: &Client, - params: &SigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, -) -> Result<()> { - let nonce = if let Some(n) = nonce { - n - } else { - let address: Address = secret_to_address(¶ms.signer); - client.account_nonce(address).await? - }; - - let call_request = CallRequest { - to: contract_address, - data: Some(encoded_call.clone().into()), - ..Default::default() - }; - let gas = client.estimate_gas(call_request).await?; - - let raw_transaction = bp_eth_poa::UnsignedTransaction { - nonce, - to: contract_address, - value: U256::zero(), - gas: if double_gas { gas.saturating_mul(2.into()) } else { gas }, - gas_price: params.gas_price, - payload: encoded_call, - } - .sign_by(¶ms.signer, Some(params.chain_id)); - - let _ = client.submit_transaction(raw_transaction).await?; - Ok(()) -} diff --git a/polkadot/bridges/relays/ethereum-client/src/types.rs b/polkadot/bridges/relays/ethereum-client/src/types.rs deleted file mode 100644 index 1bb9233b82ea4c5907d7b3d395415d58fe89221d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum-client/src/types.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Common types that are used in relay <-> Ethereum node communications. - -use headers_relay::sync_types::SourceHeader; - -pub use web3::types::{Address, Bytes, CallRequest, SyncState, H256, U128, U256, U64}; - -/// When header is just received from the Ethereum node, we check that it has -/// both number and hash fields filled. -pub const HEADER_ID_PROOF: &str = "checked on retrieval; qed"; - -/// Ethereum transaction hash type. -pub type HeaderHash = H256; - -/// Ethereum transaction hash type. -pub type TransactionHash = H256; - -/// Ethereum transaction type. -pub type Transaction = web3::types::Transaction; - -/// Ethereum header type. -pub type Header = web3::types::Block; - -/// Ethereum header type used in headers sync. -#[derive(Clone, Debug, PartialEq)] -pub struct SyncHeader(Header); - -impl std::ops::Deref for SyncHeader { - type Target = Header; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Ethereum header with transactions type. -pub type HeaderWithTransactions = web3::types::Block; - -/// Ethereum transaction receipt type. -pub type Receipt = web3::types::TransactionReceipt; - -/// Ethereum header ID. -pub type HeaderId = relay_utils::HeaderId; - -/// A raw Ethereum transaction that's been signed. -pub type SignedRawTx = Vec; - -impl From
for SyncHeader { - fn from(header: Header) -> Self { - Self(header) - } -} - -impl SourceHeader for SyncHeader { - fn id(&self) -> HeaderId { - relay_utils::HeaderId( - self.number.expect(HEADER_ID_PROOF).as_u64(), - self.hash.expect(HEADER_ID_PROOF), - ) - } - - fn parent_id(&self) -> HeaderId { - relay_utils::HeaderId(self.number.expect(HEADER_ID_PROOF).as_u64() - 1, self.parent_hash) - } -} diff --git a/polkadot/bridges/relays/ethereum/Cargo.toml b/polkadot/bridges/relays/ethereum/Cargo.toml deleted file mode 100644 index 860c0815e2e8bc929ae01de02c3afaca1f06a7ed..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "ethereum-poa-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -ansi_term = "0.12" -async-std = "1.9.0" -async-trait = "0.1.42" -clap = { version = "2.33.3", features = ["yaml"] } -codec = { package = "parity-scale-codec", version = "2.0.0" } -env_logger = "0.8.3" -ethabi = { git = "https://github.com/paritytech/ethabi.git", branch = "td-eth-types-11" } -ethabi-contract = { git = "https://github.com/paritytech/ethabi.git", branch = "td-eth-types-11" } -ethabi-derive = { git = "https://github.com/paritytech/ethabi.git", branch = "td-eth-types-11" } -futures = "0.3.12" -hex = "0.4" -hex-literal = "0.3" -libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } -log = "0.4.14" -num-traits = "0.2" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0.62" -time = "0.2" - -# Bridge dependencies - -bp-currency-exchange = { path = "../../primitives/currency-exchange" } -bp-eth-poa = { path = "../../primitives/ethereum-poa" } -exchange-relay = { path = "../exchange-relay" } -headers-relay = { path = "../headers-relay" } -messages-relay = { path = "../messages-relay" } -relay-ethereum-client = { path = "../ethereum-client" } -relay-rialto-client = { path = "../rialto-client" } -relay-substrate-client = { path = "../substrate-client" } -relay-utils = { path = "../utils" } -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/ethereum/README.md b/polkadot/bridges/relays/ethereum/README.md deleted file mode 100644 index 9fe2f623fd05aebed50c17b464c1cefefedd2d82..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# PoA <> Substrate Bridge - -**DISCLAIMER:** *we recommend not using the bridge in "production" (to bridge significant amounts) just yet. -it's missing a code audit and should still be considered alpha. we can't rule out that there are bugs that might result in loss of the bridged amounts. -we'll update this disclaimer once that changes* - -These docs are very incomplete yet. Describe high-level goals here in the (near) future. diff --git a/polkadot/bridges/relays/ethereum/res/substrate-bridge-abi.json b/polkadot/bridges/relays/ethereum/res/substrate-bridge-abi.json deleted file mode 100644 index b7d7b4b9152cdf9ecce81b09e5b4261832ab3d7f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/res/substrate-bridge-abi.json +++ /dev/null @@ -1,167 +0,0 @@ -[ - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawInitialHeader", - "type": "bytes" - }, - { - "internalType": "uint64", - "name": "initialValidatorsSetId", - "type": "uint64" - }, - { - "internalType": "bytes", - "name": "initialValidatorsSet", - "type": "bytes" - } - ], - "stateMutability": "nonpayable", - "type": "constructor" - }, - { - "stateMutability": "nonpayable", - "type": "fallback" - }, - { - "inputs": [], - "name": "bestKnownHeader", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "", - "type": "bytes32" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "finalityTargetNumber", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "finalityTargetHash", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "rawFinalityProof", - "type": "bytes" - } - ], - "name": "importFinalityProof", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "importHeaders", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [], - "name": "incompleteHeaders", - "outputs": [ - { - "internalType": "uint256[]", - "name": "", - "type": "uint256[]" - }, - { - "internalType": "bytes32[]", - "name": "", - "type": "bytes32[]" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes", - "name": "rawHeader1", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader2", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader3", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "rawHeader4", - "type": "bytes" - } - ], - "name": "isIncompleteHeaders", - "outputs": [ - { - "internalType": "uint256", - "name": "", - "type": "uint256" - } - ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "bytes32", - "name": "headerHash", - "type": "bytes32" - } - ], - "name": "isKnownHeader", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - } -] diff --git a/polkadot/bridges/relays/ethereum/res/substrate-bridge-bytecode.hex b/polkadot/bridges/relays/ethereum/res/substrate-bridge-bytecode.hex deleted file mode 100644 index 6dd6a33046f6c826b7d1b0990e620c5c60719821..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/res/substrate-bridge-bytecode.hex +++ /dev/null @@ -1 +0,0 @@ -60806040523480156200001157600080fd5b5060405162001af838038062001af8833981810160405260608110156200003757600080fd5b81019080805160405193929190846401000000008211156200005857600080fd5b9083019060208201858111156200006e57600080fd5b82516401000000008111828201881017156200008957600080fd5b82525081516020918201929091019080838360005b83811015620000b85781810151838201526020016200009e565b50505050905090810190601f168015620000e65780820380516001836020036101000a031916815260200191505b506040818152602083015192018051929491939192846401000000008211156200010f57600080fd5b9083019060208201858111156200012557600080fd5b82516401000000008111828201881017156200014057600080fd5b82525081516020918201929091019080838360005b838110156200016f57818101518382015260200162000155565b50505050905090810190601f1680156200019d5780820380516001836020036101000a031916815260200191505b50604052505050620001ae620003d5565b620001c2846001600160e01b03620002dc16565b805160008181556002918255604080840180516001908155825160e08101845281815260208088015181830190815293518286019081526080808a0151606085019081526001600160401b038e169185019190915260a0840188905260c084018890528951885260078352959096208251815460ff191690151517815593519284019290925593519482019490945590518051949550919390926200026f9260038501929101906200040a565b506080820151600482810180546001600160401b03199081166001600160401b039485161790915560a0850151600585015560c09094015160069093019290925560038054909316908616179091558251620002d1919060208501906200040a565b5050505050620004af565b620002e6620003d5565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa6200031c57600080fd5b84519b5083519a50825199508151985080519750505050505050506060816001600160401b03811180156200035057600080fd5b506040519080825280601f01601f1916602001820160405280156200037c576020820181803683370190505b5090508115620003a85787516020890160208301848184846011600019fa620003a457600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200044d57805160ff19168380011785556200047d565b828001600101855582156200047d579182015b828111156200047d57825182559160200191906001019062000460565b506200048b9291506200048f565b5090565b620004ac91905b808211156200048b576000815560010162000496565b90565b61163980620004bf6000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c8063374c2c26146100675780636a742c0914610108578063871ebe181461033d578063d96a2deb1461036e578063e8ffbe841461038f578063fae71ae8146105d4575b600080fd5b61006f610684565b604051808060200180602001838103835285818151815260200191508051906020019060200280838360005b838110156100b357818101518382015260200161009b565b50505050905001838103825284818151815260200191508051906020019060200280838360005b838110156100f25781810151838201526020016100da565b5050505090500194505050505060405180910390f35b61033b6004803603608081101561011e57600080fd5b810190602081018135600160201b81111561013857600080fd5b82018360208201111561014a57600080fd5b803590602001918460018302840111600160201b8311171561016b57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156101bd57600080fd5b8201836020820111156101cf57600080fd5b803590602001918460018302840111600160201b831117156101f057600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561024257600080fd5b82018360208201111561025457600080fd5b803590602001918460018302840111600160201b8311171561027557600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156102c757600080fd5b8201836020820111156102d957600080fd5b803590602001918460018302840111600160201b831117156102fa57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610789945050505050565b005b61035a6004803603602081101561035357600080fd5b50356107e5565b604080519115158252519081900360200190f35b6103766107fd565b6040805192835260208301919091528051918290030190f35b6105c2600480360360808110156103a557600080fd5b810190602081018135600160201b8111156103bf57600080fd5b8201836020820111156103d157600080fd5b803590602001918460018302840111600160201b831117156103f257600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561044457600080fd5b82018360208201111561045657600080fd5b803590602001918460018302840111600160201b8311171561047757600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156104c957600080fd5b8201836020820111156104db57600080fd5b803590602001918460018302840111600160201b831117156104fc57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561054e57600080fd5b82018360208201111561056057600080fd5b803590602001918460018302840111600160201b8311171561058157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610815945050505050565b60408051918252519081900360200190f35b61033b600480360360608110156105ea57600080fd5b813591602081013591810190606081016040820135600160201b81111561061057600080fd5b82018360208201111561062257600080fd5b803590602001918460018302840111600160201b8311171561064357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610b28945050505050565b6005546060908190818167ffffffffffffffff811180156106a457600080fd5b506040519080825280602002602001820160405280156106ce578160200160208202803683370190505b50905060005b828110156107295760076000600583815481106106ed57fe5b906000526020600020015481526020019081526020016000206002015482828151811061071657fe5b60209081029190910101526001016106d4565b508060058080548060200260200160405190810160405280929190818152602001828054801561077857602002820191906000526020600020905b815481526020019060010190808311610764575b505050505090509350935050509091565b61079284610d8d565b61079b576107df565b8251156107b4576107ab83610d8d565b6107b4576107df565b8151156107cd576107c482610d8d565b6107cd576107df565b8051156107df576107dd81610d8d565b505b50505050565b60008181526007602052604090205460ff165b919050565b60008054808252600760205260409091206002015491565b600061081f611454565b61082886610f0e565b9050610832611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156108f85780601f106108cd576101008083540402835291602001916108f8565b820191906000526020600020905b8154815290600101906020018083116108db57829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506000806109398484611001565b945050505091506000600681111561094d57fe5b82600681111561095957fe5b146109ab576040805162461bcd60e51b815260206004820152601860248201527f43616e277420696d706f727420616e7920686561646572730000000000000000604482015290519081900360640190fd5b83604001518114156109c4576001945050505050610b20565b87516109d7576000945050505050610b20565b6109df611489565b6109e98585611171565b90506109f3611454565b6109fc8a610f0e565b90506000610a0a8284611001565b9450505050508160400151811415610a2c576002975050505050505050610b20565b8951610a42576000975050505050505050610b20565b610a4a611489565b610a548388611171565b9050610a5e611454565b610a678c610f0e565b90506000610a758284611001565b9450505050508160400151811415610a9a5760039a5050505050505050505050610b20565b8b51610ab35760009a5050505050505050505050610b20565b610abb611489565b610ac5838b611171565b9050610acf611454565b610ad88e610f0e565b90506000610ae68284611001565b9450505050508160400151811415610b0e5760049d5050505050505050505050505050610b20565b60009d50505050505050505050505050505b949350505050565b6000828152600760205260409020600201548314610b775760405162461bcd60e51b815260040180806020018281038252602f8152602001806115d5602f913960400191505060405180910390fd5b60028054600354600480546040805160206101006001851615026000190190931696909604601f81018390048302870183019091528086529394600094610c28948a948a9467ffffffffffffffff90921693929091830182828015610c1d5780601f10610bf257610100808354040283529160200191610c1d565b820191906000526020600020905b815481529060010190602001808311610c0057829003601f168201915b5050505050876111d0565b600081815260076020526040902060028281558101546001559091505b828214610d8557506000818152600760209081526040808320600181015460069093529220549092908015610d07576005546000199182019181018214610cd357600060056001830381548110610c9857fe5b906000526020600020015490508060058481548110610cb357fe5b600091825260208083209091019290925591825260069052604090208290555b6005805480610cde57fe5b600082815260208082208301600019908101839055909201909255848252600690526040812055505b826006015483600201541415610d7e57600583015460009081526007602052604090206003805467ffffffffffffffff198116600167ffffffffffffffff92831681019092161782559082018054610d759260049291600261010092821615929092026000190116046114c4565b50505050610d85565b5050610c45565b505050505050565b600080610d98611454565b6000806000610da687611312565b9398509196509450925090506000856006811115610dc057fe5b14610dd3576000955050505050506107f8565b604084015181148015610e27576005805486516001820180845560009384527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0909201558651825260066020526040909120555b6040805160e0810182526001808252602088810151818401908152898501518486019081526080808c01516060870190815267ffffffffffffffff8c169187019190915260a086018a905260c086018990528b51600090815260078552969096208551815460ff1916901515178155915193820193909355915160028301559251805192939192610ebe9260038501920190611549565b50608082015160048201805467ffffffffffffffff191667ffffffffffffffff90921691909117905560a0820151600582015560c090910151600690910155935160005550509015949350505050565b610f16611454565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa610f4b57600080fd5b84519b5083519a508251995081519850805197505050505050505060608167ffffffffffffffff81118015610f7f57600080fd5b506040519080825280601f01601f191660200182016040528015610faa576020820181803683370190505b5090508115610fd45787516020890160208301848184846011600019fa610fd057600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b600061100b611454565b83516000908152600760205260408120548190819060ff161561103d5750600193508592506000915081905080611167565b60015487604001511161105f5750600293508592506000915081905080611167565b8551158061107857506001876040015103866040015114155b156110925750600393508592506000915081905080611167565b60c0860151158015906110ac575085604001518660c00151145b156110d3578660200151600254146110d35750600493508592506000915081905080611167565b60808087015160a088015160c0890151928a01515191929091156111585767ffffffffffffffff838116141561111d57506005965088955060009450849350839250611167915050565b8960400151811061114257506006965088955060009450849350839250611167915050565b50508751606089015160408a0151600190930192015b60009750899650919450925090505b9295509295909350565b611179611489565b506040805160e08101825260018082528451602083015293820151909301908301526060818101519083015260808082015167ffffffffffffffff169083015260a0808201519083015260c0908101519082015290565b600060608686868686604051602001808681526020018581526020018467ffffffffffffffff1667ffffffffffffffff1681526020018060200180602001838103835285818151815260200191508051906020019080838360005b8381101561124357818101518382015260200161122b565b50505050905090810190601f1680156112705780820380516001836020036101000a031916815260200191505b50838103825284518152845160209182019186019080838360005b838110156112a357818101518382015260200161128b565b50505050905090810190601f1680156112d05780820380516001836020036101000a031916815260200191505b50975050505050505050604051602081830303815290604052905080516020820160008083836012600019fa61130557600080fd5b5095979650505050505050565b600061131c611454565b6000806000611329611454565b61133287610f0e565b905061133c611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156114025780601f106113d757610100808354040283529160200191611402565b820191906000526020600020905b8154815290600101906020018083116113e557829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506114408282611001565b939c929b5090995097509095509350505050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b6040805160e0810182526000808252602082018190529181018290526060808201526080810182905260a0810182905260c081019190915290565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106114fd5780548555611539565b8280016001018555821561153957600052602060002091601f016020900482015b8281111561153957825482559160010191906001019061151e565b506115459291506115b7565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061158a57805160ff1916838001178555611539565b82800160010185558215611539579182015b8281111561153957825182559160200191906001019061159c565b6115d191905b8082111561154557600081556001016115bd565b9056fe4d697373696e672066696e616c69747920746172676574206865616465722066726f6d207468652073746f72616765a2646970667358221220edcaec08f93f74ce5be00b81da5d6b2276138571a33f1cfdca50e5047f854e6e64736f6c63430006060033 \ No newline at end of file diff --git a/polkadot/bridges/relays/ethereum/res/substrate-bridge-metadata.txt b/polkadot/bridges/relays/ethereum/res/substrate-bridge-metadata.txt deleted file mode 100644 index 13b7daa9a8b8b317f4e9fad9b99bb4986dc85a91..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/res/substrate-bridge-metadata.txt +++ /dev/null @@ -1,5 +0,0 @@ -Last Change Date: 2020-07-30 -Solc version: 0.6.6+commit.6c089d02.Linux.g++ -Source hash (keccak256): 0xea5d6d744f69157adc2857166792aca139c0b5b186ba89c1011358fbcad90d7e -Source gist: https://github.com/svyatonik/substrate-bridge-sol/blob/6456d3e016c95cd5e6d5e817c23e9e69e739aa78/substrate-bridge.sol -Compiler flags used (command to produce the file): `docker run -i ethereum/solc:0.6.6 --optimize --bin - < substrate-bridge.sol` \ No newline at end of file diff --git a/polkadot/bridges/relays/ethereum/src/cli.yml b/polkadot/bridges/relays/ethereum/src/cli.yml deleted file mode 100644 index c6a5b08e1bb045b1e118e25c907031c088fb2c89..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/cli.yml +++ /dev/null @@ -1,166 +0,0 @@ -name: ethsub-bridge -version: "0.1.0" -author: Parity Technologies -about: Parity Ethereum (PoA) <-> Substrate bridge -subcommands: - - eth-to-sub: - about: Synchronize headers from Ethereum node to Substrate node. - args: - - eth-host: ð-host - long: eth-host - value_name: ETH_HOST - help: Connect to Ethereum node at given host. - takes_value: true - - eth-port: ð-port - long: eth-port - value_name: ETH_PORT - help: Connect to Ethereum node at given port. - takes_value: true - - sub-host: &sub-host - long: sub-host - value_name: SUB_HOST - help: Connect to Substrate node at given host. - takes_value: true - - sub-port: &sub-port - long: sub-port - value_name: SUB_PORT - help: Connect to Substrate node websocket server at given port. - takes_value: true - - sub-tx-mode: - long: sub-tx-mode - value_name: MODE - help: Submit headers using signed (default) or unsigned transactions. Third mode - backup - submits signed transactions only when we believe that sync has stalled. - takes_value: true - possible_values: - - signed - - unsigned - - backup - - sub-signer: &sub-signer - long: sub-signer - value_name: SUB_SIGNER - help: The SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-signer-password: &sub-signer-password - long: sub-signer-password - value_name: SUB_SIGNER_PASSWORD - help: The password for the SURI of secret key to use when transactions are submitted to the Substrate node. - - sub-pallet-instance: &sub-pallet-instance - long: instance - short: i - value_name: PALLET_INSTANCE - help: The instance of the bridge pallet the relay should follow. - takes_value: true - case_insensitive: true - possible_values: - - Rialto - - Kovan - default_value: Rialto - - no-prometheus: &no-prometheus - long: no-prometheus - help: Do not expose a Prometheus metric endpoint. - - prometheus-host: &prometheus-host - long: prometheus-host - value_name: PROMETHEUS_HOST - help: Expose Prometheus endpoint at given interface. - - prometheus-port: &prometheus-port - long: prometheus-port - value_name: PROMETHEUS_PORT - help: Expose Prometheus endpoint at given port. - - sub-to-eth: - about: Synchronize headers from Substrate node to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-contract: - long: eth-contract - value_name: ETH_CONTRACT - help: Address of deployed bridge contract. - takes_value: true - - eth-chain-id: ð-chain-id - long: eth-chain-id - value_name: ETH_CHAIN_ID - help: Chain ID to use for signing. - - eth-signer: ð-signer - long: eth-signer - value_name: ETH_SIGNER - help: Hex-encoded secret to use when transactions are submitted to the Ethereum node. - - sub-host: *sub-host - - sub-port: *sub-port - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port - - eth-deploy-contract: - about: Deploy Bridge contract on Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-contract-code: - long: eth-contract-code - value_name: ETH_CONTRACT_CODE - help: Bytecode of bridge contract. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-authorities-set-id: - long: sub-authorities-set-id - value_name: SUB_AUTHORITIES_SET_ID - help: ID of initial GRANDPA authorities set. - takes_value: true - - sub-authorities-set: - long: sub-authorities-set - value_name: SUB_AUTHORITIES_SET - help: Encoded initial GRANDPA authorities set. - takes_value: true - - sub-initial-header: - long: sub-initial-header - value_name: SUB_INITIAL_HEADER - help: Encoded initial Substrate header. - takes_value: true - - eth-submit-exchange-tx: - about: Submit lock funds transaction to Ethereum node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-nonce: - long: eth-nonce - value_name: ETH_NONCE - help: Nonce that have to be used when building transaction. If not specified, read from PoA node. - takes_value: true - - eth-signer: *eth-signer - - eth-chain-id: *eth-chain-id - - eth-amount: - long: eth-amount - value_name: ETH_AMOUNT - help: Amount of ETH to lock (in wei). - takes_value: true - - sub-recipient: - long: sub-recipient - value_name: SUB_RECIPIENT - help: Hex-encoded Public key of funds recipient in Substrate chain. - takes_value: true - - eth-exchange-sub: - about: Submit proof of PoA lock funds transaction to Substrate node. - args: - - eth-host: *eth-host - - eth-port: *eth-port - - eth-start-with-block: - long: eth-start-with-block - value_name: ETH_START_WITH_BLOCK - help: Auto-relay transactions starting with given block number. If not specified, starts with best finalized Ethereum block (known to Substrate node) transactions. - takes_value: true - conflicts_with: - - eth-tx-hash - - eth-tx-hash: - long: eth-tx-hash - value_name: ETH_TX_HASH - help: Hash of the lock funds transaction. - takes_value: true - - sub-host: *sub-host - - sub-port: *sub-port - - sub-signer: *sub-signer - - sub-signer-password: *sub-signer-password - - sub-pallet-instance: *sub-pallet-instance - - no-prometheus: *no-prometheus - - prometheus-host: *prometheus-host - - prometheus-port: *prometheus-port diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_client.rs b/polkadot/bridges/relays/ethereum/src/ethereum_client.rs deleted file mode 100644 index 46c2c76feee08914e050632ad6d72b6ac36c03fc..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/ethereum_client.rs +++ /dev/null @@ -1,653 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::rpc_errors::RpcError; -use crate::substrate_sync_loop::QueuedRialtoHeader; - -use async_trait::async_trait; -use bp_eth_poa::signatures::secret_to_address; -use codec::{Decode, Encode}; -use ethabi::FunctionOutputDecoder; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::{ - sign_and_submit_transaction, - types::{Address, CallRequest, HeaderId as EthereumHeaderId, Receipt, H256, U256}, - Client as EthereumClient, Error as EthereumNodeError, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::HeaderId as RialtoHeaderId; -use relay_utils::{HeaderId, MaybeConnectionError}; -use sp_runtime::Justification; -use std::collections::HashSet; - -// to encode/decode contract calls -ethabi_contract::use_contract!(bridge_contract, "res/substrate-bridge-abi.json"); - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum contract. -#[async_trait] -pub trait EthereumHighLevelRpc { - /// Returns best Substrate block that PoA chain knows of. - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult; - - /// Returns true if Substrate header is known to Ethereum node. - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)>; - - /// Submits Substrate headers to Ethereum contract. - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders; - - /// Returns ids of incomplete Substrate headers. - async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult>; - - /// Complete Substrate header. - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: Justification, - ) -> RpcResult; - - /// Submit ethereum transaction. - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()>; - - /// Retrieve transactions receipts for given block. - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)>; -} - -#[async_trait] -impl EthereumHighLevelRpc for EthereumClient { - async fn best_substrate_block(&self, contract_address: Address) -> RpcResult { - let (encoded_call, call_decoder) = bridge_contract::functions::best_known_header::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let (number, raw_hash) = call_decoder.decode(&call_result.0)?; - let hash = rialto_runtime::Hash::decode(&mut &raw_hash[..])?; - - if number != number.low_u32().into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)); - } - - Ok(HeaderId(number.low_u32(), hash)) - } - - async fn substrate_header_known( - &self, - contract_address: Address, - id: RialtoHeaderId, - ) -> RpcResult<(RialtoHeaderId, bool)> { - let (encoded_call, call_decoder) = bridge_contract::functions::is_known_header::call(id.1); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - let is_known_block = call_decoder.decode(&call_result.0)?; - - Ok((id, is_known_block)) - } - - async fn submit_substrate_headers( - &self, - params: EthereumSigningParams, - contract_address: Address, - headers: Vec, - ) -> SubmittedHeaders { - // read nonce of signer - let address: Address = secret_to_address(¶ms.signer); - let nonce = match self.account_nonce(address).await { - Ok(nonce) => nonce, - Err(error) => { - return SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: headers.iter().rev().map(|header| header.id()).collect(), - fatal_error: Some(error.into()), - } - } - }; - - // submit headers. Note that we're cloning self here. It is ok, because - // cloning `jsonrpsee::Client` only clones reference to background threads - submit_substrate_headers( - EthereumHeadersSubmitter { - client: self.clone(), - params, - contract_address, - nonce, - }, - headers, - ) - .await - } - - async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult> { - let (encoded_call, call_decoder) = bridge_contract::functions::incomplete_headers::call(); - let call_request = CallRequest { - to: Some(contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.eth_call(call_request).await?; - - // Q: Is is correct to call these "incomplete_ids"? - let (incomplete_headers_numbers, incomplete_headers_hashes) = call_decoder.decode(&call_result.0)?; - let incomplete_ids = incomplete_headers_numbers - .into_iter() - .zip(incomplete_headers_hashes) - .filter_map(|(number, hash)| { - if number != number.low_u32().into() { - return None; - } - - Some(HeaderId(number.low_u32(), hash)) - }) - .collect(); - - Ok(incomplete_ids) - } - - async fn complete_substrate_header( - &self, - params: EthereumSigningParams, - contract_address: Address, - id: RialtoHeaderId, - justification: Justification, - ) -> RpcResult { - let _ = self - .submit_ethereum_transaction( - ¶ms, - Some(contract_address), - None, - false, - bridge_contract::functions::import_finality_proof::encode_input(id.0, id.1, justification), - ) - .await?; - - Ok(id) - } - - async fn submit_ethereum_transaction( - &self, - params: &EthereumSigningParams, - contract_address: Option
, - nonce: Option, - double_gas: bool, - encoded_call: Vec, - ) -> RpcResult<()> { - sign_and_submit_transaction(self, params, contract_address, nonce, double_gas, encoded_call) - .await - .map_err(Into::into) - } - - async fn transaction_receipts( - &self, - id: EthereumHeaderId, - transactions: Vec, - ) -> RpcResult<(EthereumHeaderId, Vec)> { - let mut transaction_receipts = Vec::with_capacity(transactions.len()); - for transaction in transactions { - let transaction_receipt = self.transaction_receipt(transaction).await?; - transaction_receipts.push(transaction_receipt); - } - Ok((id, transaction_receipts)) - } -} - -/// Max number of headers which can be sent to Solidity contract. -pub const HEADERS_BATCH: usize = 4; - -/// Substrate headers to send to the Ethereum light client. -/// -/// The Solidity contract can only accept a fixed number of headers in one go. -/// This struct is meant to encapsulate this limitation. -#[derive(Debug)] -#[cfg_attr(test, derive(Clone))] -pub struct HeadersBatch { - pub header1: QueuedRialtoHeader, - pub header2: Option, - pub header3: Option, - pub header4: Option, -} - -impl HeadersBatch { - /// Create new headers from given header & ids collections. - /// - /// This method will pop `HEADERS_BATCH` items from both collections - /// and construct `Headers` object and a vector of `RialtoHeaderId`s. - pub fn pop_from( - headers: &mut Vec, - ids: &mut Vec, - ) -> Result<(Self, Vec), ()> { - if headers.len() != ids.len() { - log::error!(target: "bridge", "Collection size mismatch ({} vs {})", headers.len(), ids.len()); - return Err(()); - } - - let header1 = headers.pop().ok_or(())?; - let header2 = headers.pop(); - let header3 = headers.pop(); - let header4 = headers.pop(); - - let mut submitting_ids = Vec::with_capacity(HEADERS_BATCH); - for _ in 0..HEADERS_BATCH { - submitting_ids.extend(ids.pop().iter()); - } - - Ok(( - Self { - header1, - header2, - header3, - header4, - }, - submitting_ids, - )) - } - - /// Returns unified array of headers. - /// - /// The first element is always `Some`. - fn headers(&self) -> [Option<&QueuedRialtoHeader>; HEADERS_BATCH] { - [ - Some(&self.header1), - self.header2.as_ref(), - self.header3.as_ref(), - self.header4.as_ref(), - ] - } - - /// Encodes all headers. If header is not present an empty vector will be returned. - pub fn encode(&self) -> [Vec; HEADERS_BATCH] { - let encode = |h: &QueuedRialtoHeader| h.header().encode(); - let headers = self.headers(); - [ - headers[0].map(encode).unwrap_or_default(), - headers[1].map(encode).unwrap_or_default(), - headers[2].map(encode).unwrap_or_default(), - headers[3].map(encode).unwrap_or_default(), - ] - } - /// Returns number of contained headers. - pub fn len(&self) -> usize { - let is_set = |h: &Option<&QueuedRialtoHeader>| if h.is_some() { 1 } else { 0 }; - self.headers().iter().map(is_set).sum() - } - - /// Remove headers starting from `idx` (0-based) from this collection. - /// - /// The collection will be left with `[0, idx)` headers. - /// Returns `Err` when `idx == 0`, since `Headers` must contain at least one header, - /// or when `idx > HEADERS_BATCH`. - pub fn split_off(&mut self, idx: usize) -> Result<(), ()> { - if idx == 0 || idx > HEADERS_BATCH { - return Err(()); - } - let mut vals: [_; HEADERS_BATCH] = [&mut None, &mut self.header2, &mut self.header3, &mut self.header4]; - for val in vals.iter_mut().skip(idx) { - **val = None; - } - Ok(()) - } -} - -/// Substrate headers submitter API. -#[async_trait] -trait HeadersSubmitter { - /// Returns Ok(0) if all given not-yet-imported headers are complete. - /// Returns Ok(index != 0) where index is 1-based index of first header that is incomplete. - /// - /// Returns Err(()) if contract has rejected headers. This means that the contract is - /// unable to import first header (e.g. it may already be imported). - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult; - - /// Submit given headers to Ethereum node. - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()>; -} - -/// Implementation of Substrate headers submitter that sends headers to running Ethereum node. -struct EthereumHeadersSubmitter { - client: EthereumClient, - params: EthereumSigningParams, - contract_address: Address, - nonce: U256, -} - -#[async_trait] -impl HeadersSubmitter for EthereumHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - let [h1, h2, h3, h4] = headers.encode(); - let (encoded_call, call_decoder) = bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4); - let call_request = CallRequest { - to: Some(self.contract_address), - data: Some(encoded_call.into()), - ..Default::default() - }; - - let call_result = self.client.eth_call(call_request).await?; - let incomplete_index: U256 = call_decoder.decode(&call_result.0)?; - if incomplete_index > HEADERS_BATCH.into() { - return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex)); - } - - Ok(incomplete_index.low_u32() as _) - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - let [h1, h2, h3, h4] = headers.encode(); - let result = self - .client - .submit_ethereum_transaction( - &self.params, - Some(self.contract_address), - Some(self.nonce), - false, - bridge_contract::functions::import_headers::encode_input(h1, h2, h3, h4), - ) - .await; - - if result.is_ok() { - self.nonce += U256::one(); - } - - result - } -} - -/// Submit multiple Substrate headers. -async fn submit_substrate_headers( - mut header_submitter: impl HeadersSubmitter, - mut headers: Vec, -) -> SubmittedHeaders { - let mut submitted_headers = SubmittedHeaders::default(); - - let mut ids = headers.iter().map(|header| header.id()).rev().collect::>(); - headers.reverse(); - - while !headers.is_empty() { - let (headers, submitting_ids) = - HeadersBatch::pop_from(&mut headers, &mut ids).expect("Headers and ids are not empty; qed"); - - submitted_headers.fatal_error = - submit_substrate_headers_batch(&mut header_submitter, &mut submitted_headers, submitting_ids, headers) - .await; - - if submitted_headers.fatal_error.is_some() { - ids.reverse(); - submitted_headers.rejected.extend(ids); - break; - } - } - - submitted_headers -} - -/// Submit 4 Substrate headers in single PoA transaction. -async fn submit_substrate_headers_batch( - header_submitter: &mut impl HeadersSubmitter, - submitted_headers: &mut SubmittedHeaders, - mut ids: Vec, - mut headers: HeadersBatch, -) -> Option { - debug_assert_eq!(ids.len(), headers.len(),); - - // if parent of first header is either incomplete, or rejected, we assume that contract - // will reject this header as well - let parent_id = headers.header1.parent_id(); - if submitted_headers.rejected.contains(&parent_id) || submitted_headers.incomplete.contains(&parent_id) { - submitted_headers.rejected.extend(ids); - return None; - } - - // check if headers are incomplete - let incomplete_header_index = match header_submitter.is_headers_incomplete(&headers).await { - // All headers valid - Ok(0) => None, - Ok(incomplete_header_index) => Some(incomplete_header_index), - Err(error) => { - // contract has rejected all headers => we do not want to submit it - submitted_headers.rejected.extend(ids); - if error.is_connection_error() { - return Some(error); - } else { - return None; - } - } - }; - - // Modify `ids` and `headers` to only contain values that are going to be accepted. - let rejected = if let Some(idx) = incomplete_header_index { - let len = std::cmp::min(idx, ids.len()); - headers - .split_off(len) - .expect("len > 0, the case where all headers are valid is converted to None; qed"); - ids.split_off(len) - } else { - Vec::new() - }; - let submitted = ids; - let submit_result = header_submitter.submit_headers(headers).await; - match submit_result { - Ok(_) => { - if incomplete_header_index.is_some() { - submitted_headers.incomplete.extend(submitted.iter().last().cloned()); - } - submitted_headers.submitted.extend(submitted); - submitted_headers.rejected.extend(rejected); - None - } - Err(error) => { - submitted_headers.rejected.extend(submitted); - submitted_headers.rejected.extend(rejected); - Some(error) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::traits::Header; - - struct TestHeadersSubmitter { - incomplete: Vec, - failed: Vec, - } - - #[async_trait] - impl HeadersSubmitter for TestHeadersSubmitter { - async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { - if self.incomplete.iter().any(|i| i.0 == headers.header1.id().0) { - Ok(1) - } else { - Ok(0) - } - } - - async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { - if self.failed.iter().any(|i| i.0 == headers.header1.id().0) { - Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)) - } else { - Ok(()) - } - } - } - - fn header(number: rialto_runtime::BlockNumber) -> QueuedRialtoHeader { - QueuedRialtoHeader::new( - rialto_runtime::Header::new( - number, - Default::default(), - Default::default(), - if number == 0 { - Default::default() - } else { - header(number - 1).id().1 - }, - Default::default(), - ) - .into(), - ) - } - - #[test] - fn descendants_of_incomplete_headers_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { - incomplete: vec![header(5).id()], - failed: vec![], - }, - vec![header(5), header(6)], - )); - assert_eq!(submitted_headers.submitted, vec![header(5).id()]); - assert_eq!(submitted_headers.incomplete, vec![header(5).id()]); - assert_eq!(submitted_headers.rejected, vec![header(6).id()]); - assert!(submitted_headers.fatal_error.is_none()); - } - - #[test] - fn headers_after_fatal_error_are_not_submitted() { - let submitted_headers = async_std::task::block_on(submit_substrate_headers( - TestHeadersSubmitter { - incomplete: vec![], - failed: vec![header(9).id()], - }, - vec![ - header(5), - header(6), - header(7), - header(8), - header(9), - header(10), - header(11), - ], - )); - assert_eq!( - submitted_headers.submitted, - vec![header(5).id(), header(6).id(), header(7).id(), header(8).id()] - ); - assert_eq!(submitted_headers.incomplete, vec![]); - assert_eq!( - submitted_headers.rejected, - vec![header(9).id(), header(10).id(), header(11).id(),] - ); - assert!(submitted_headers.fatal_error.is_some()); - } - - fn headers_batch() -> HeadersBatch { - let mut init_headers = vec![header(1), header(2), header(3), header(4), header(5)]; - init_headers.reverse(); - let mut init_ids = init_headers.iter().map(|h| h.id()).collect(); - let (headers, ids) = HeadersBatch::pop_from(&mut init_headers, &mut init_ids).unwrap(); - assert_eq!(init_headers, vec![header(5)]); - assert_eq!(init_ids, vec![header(5).id()]); - assert_eq!( - ids, - vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()] - ); - headers - } - - #[test] - fn headers_batch_len() { - let headers = headers_batch(); - assert_eq!(headers.len(), 4); - } - - #[test] - fn headers_batch_encode() { - let headers = headers_batch(); - assert_eq!( - headers.encode(), - [ - header(1).header().encode(), - header(2).header().encode(), - header(3).header().encode(), - header(4).header().encode(), - ] - ); - } - - #[test] - fn headers_batch_split_off() { - // given - let mut headers = headers_batch(); - - // when - assert!(headers.split_off(0).is_err()); - assert_eq!(headers.header1, header(1)); - assert!(headers.header2.is_some()); - assert!(headers.header3.is_some()); - assert!(headers.header4.is_some()); - - // when - let mut h = headers.clone(); - h.split_off(1).unwrap(); - assert!(h.header2.is_none()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(2).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_none()); - assert!(h.header4.is_none()); - - // when - let mut h = headers.clone(); - h.split_off(3).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_none()); - - // when - let mut h = headers; - h.split_off(4).unwrap(); - assert!(h.header2.is_some()); - assert!(h.header3.is_some()); - assert!(h.header4.is_some()); - } -} diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_deploy_contract.rs b/polkadot/bridges/relays/ethereum/src/ethereum_deploy_contract.rs deleted file mode 100644 index 25f8c873e590360f1ff0e8cecbd4624f20dd9a5b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/ethereum_deploy_contract.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::ethereum_client::{bridge_contract, EthereumHighLevelRpc}; -use crate::rpc_errors::RpcError; - -use codec::{Decode, Encode}; -use num_traits::Zero; -use relay_ethereum_client::{ - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto}; -use relay_substrate_client::{ - Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, OpaqueGrandpaAuthoritiesSet, -}; -use relay_utils::HeaderId; - -/// Ethereum synchronization parameters. -#[derive(Debug)] -pub struct EthereumDeployContractParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum contract bytecode. - pub eth_contract_code: Vec, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Initial authorities set id. - pub sub_initial_authorities_set_id: Option, - /// Initial authorities set. - pub sub_initial_authorities_set: Option>, - /// Initial header. - pub sub_initial_header: Option>, -} - -/// Deploy Bridge contract on Ethereum chain. -pub fn run(params: EthereumDeployContractParams) { - let mut local_pool = futures::executor::LocalPool::new(); - - let EthereumDeployContractParams { - eth_params, - eth_sign, - sub_params, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - } = params; - - let result = local_pool.run_until(async move { - let eth_client = EthereumClient::new(eth_params); - let sub_client = SubstrateClient::::new(sub_params).await.map_err(RpcError::Substrate)?; - - let (initial_header_id, initial_header) = prepare_initial_header(&sub_client, sub_initial_header).await?; - let initial_set_id = sub_initial_authorities_set_id.unwrap_or(0); - let initial_set = prepare_initial_authorities_set( - &sub_client, - initial_header_id.1, - sub_initial_authorities_set, - ).await?; - - log::info!( - target: "bridge", - "Deploying Ethereum contract.\r\n\tInitial header: {:?}\r\n\tInitial header id: {:?}\r\n\tInitial header encoded: {}\r\n\tInitial authorities set ID: {}\r\n\tInitial authorities set: {}", - initial_header, - initial_header_id, - hex::encode(&initial_header), - initial_set_id, - hex::encode(&initial_set), - ); - - deploy_bridge_contract( - ð_client, - ð_sign, - eth_contract_code, - initial_header, - initial_set_id, - initial_set, - ).await - }); - - if let Err(error) = result { - log::error!(target: "bridge", "{}", error); - } -} - -/// Prepare initial header. -async fn prepare_initial_header( - sub_client: &SubstrateClient, - sub_initial_header: Option>, -) -> Result<(RialtoHeaderId, Vec), String> { - match sub_initial_header { - Some(raw_initial_header) => match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) { - Ok(initial_header) => Ok(( - HeaderId(initial_header.number, initial_header.hash()), - raw_initial_header, - )), - Err(error) => Err(format!("Error decoding initial header: {}", error)), - }, - None => { - let initial_header = sub_client.header_by_number(Zero::zero()).await; - initial_header - .map(|header| (HeaderId(Zero::zero(), header.hash()), header.encode())) - .map_err(|error| format!("Error reading Substrate genesis header: {:?}", error)) - } - } -} - -/// Prepare initial GRANDPA authorities set. -async fn prepare_initial_authorities_set( - sub_client: &SubstrateClient, - sub_initial_header_hash: rialto_runtime::Hash, - sub_initial_authorities_set: Option>, -) -> Result { - let initial_authorities_set = match sub_initial_authorities_set { - Some(initial_authorities_set) => Ok(initial_authorities_set), - None => sub_client.grandpa_authorities_set(sub_initial_header_hash).await, - }; - - initial_authorities_set.map_err(|error| format!("Error reading GRANDPA authorities set: {:?}", error)) -} - -/// Deploy bridge contract to Ethereum chain. -async fn deploy_bridge_contract( - eth_client: &EthereumClient, - params: &EthereumSigningParams, - contract_code: Vec, - initial_header: Vec, - initial_set_id: u64, - initial_authorities: Vec, -) -> Result<(), String> { - eth_client - .submit_ethereum_transaction( - params, - None, - None, - false, - bridge_contract::constructor(contract_code, initial_header, initial_set_id, initial_authorities), - ) - .await - .map_err(|error| format!("Error deploying contract: {:?}", error)) -} diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_exchange.rs b/polkadot/bridges/relays/ethereum/src/ethereum_exchange.rs deleted file mode 100644 index 92ba211535129447115a35cff61e49d1002b1e13..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/ethereum_exchange.rs +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of PoA -> Substrate exchange transactions. - -use crate::instances::BridgeInstance; -use crate::rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc}; -use crate::rpc_errors::RpcError; -use crate::substrate_types::into_substrate_ethereum_receipt; - -use async_trait::async_trait; -use bp_currency_exchange::MaybeLockFundsTransaction; -use exchange_relay::exchange::{ - relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient, - TransactionProofPipeline, -}; -use exchange_relay::exchange_loop::{run as run_loop, InMemoryStorage}; -use relay_ethereum_client::{ - types::{ - HeaderId as EthereumHeaderId, HeaderWithTransactions as EthereumHeaderWithTransactions, - Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256, HEADER_ID_PROOF, - }, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId}; -use rialto_runtime::exchange::EthereumTransactionInclusionProof; -use std::{sync::Arc, time::Duration}; - -/// Interval at which we ask Ethereum node for updates. -const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - -/// Exchange relay mode. -#[derive(Debug)] -pub enum ExchangeRelayMode { - /// Relay single transaction and quit. - Single(EthereumTransactionHash), - /// Auto-relay transactions starting with given block. - Auto(Option), -} - -/// PoA exchange transaction relay params. -#[derive(Debug)] -pub struct EthereumExchangeParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Relay working mode. - pub mode: ExchangeRelayMode, - /// Metrics parameters. - pub metrics_params: Option, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -/// Ethereum to Substrate exchange pipeline. -struct EthereumToSubstrateExchange; - -impl TransactionProofPipeline for EthereumToSubstrateExchange { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Block = EthereumSourceBlock; - type TransactionProof = EthereumTransactionInclusionProof; -} - -/// Ethereum source block. -struct EthereumSourceBlock(EthereumHeaderWithTransactions); - -impl SourceBlock for EthereumSourceBlock { - type Hash = H256; - type Number = u64; - type Transaction = EthereumSourceTransaction; - - fn id(&self) -> EthereumHeaderId { - HeaderId( - self.0.number.expect(HEADER_ID_PROOF).as_u64(), - self.0.hash.expect(HEADER_ID_PROOF), - ) - } - - fn transactions(&self) -> Vec { - self.0 - .transactions - .iter() - .cloned() - .map(EthereumSourceTransaction) - .collect() - } -} - -/// Ethereum source transaction. -struct EthereumSourceTransaction(EthereumTransaction); - -impl SourceTransaction for EthereumSourceTransaction { - type Hash = EthereumTransactionHash; - - fn hash(&self) -> Self::Hash { - self.0.hash - } -} - -/// Ethereum node as transactions proof source. -#[derive(Clone)] -struct EthereumTransactionsSource { - client: EthereumClient, -} - -#[async_trait] -impl RelayClient for EthereumTransactionsSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect(); - Ok(()) - } -} - -#[async_trait] -impl SourceClient for EthereumTransactionsSource { - async fn tick(&self) { - async_std::task::sleep(ETHEREUM_TICK_INTERVAL).await; - } - - async fn block_by_hash(&self, hash: H256) -> Result { - self.client - .header_by_hash_with_transactions(hash) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn block_by_number(&self, number: u64) -> Result { - self.client - .header_by_number_with_transactions(number) - .await - .map(EthereumSourceBlock) - .map_err(Into::into) - } - - async fn transaction_block( - &self, - hash: &EthereumTransactionHash, - ) -> Result, RpcError> { - let eth_tx = match self.client.transaction_by_hash(*hash).await? { - Some(eth_tx) => eth_tx, - None => return Ok(None), - }; - - // we need transaction to be mined => check if it is included in the block - let (eth_header_id, eth_tx_index) = match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) { - (Some(block_number), Some(block_hash), Some(transaction_index)) => ( - HeaderId(block_number.as_u64(), block_hash), - transaction_index.as_u64() as _, - ), - _ => return Ok(None), - }; - - Ok(Some((eth_header_id, eth_tx_index))) - } - - async fn transaction_proof( - &self, - block: &EthereumSourceBlock, - tx_index: usize, - ) -> Result { - const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = "RPC level checks that transactions from Ethereum\ - node are having `raw` field; qed"; - const BLOCK_HAS_HASH_FIELD_PROOF: &str = "RPC level checks that block has `hash` field; qed"; - - let mut transaction_proof = Vec::with_capacity(block.0.transactions.len()); - for tx in &block.0.transactions { - let raw_tx_receipt = self - .client - .transaction_receipt(tx.hash) - .await - .map(|receipt| into_substrate_ethereum_receipt(&receipt)) - .map(|receipt| receipt.rlp())?; - let raw_tx = tx.raw.clone().expect(TRANSACTION_HAS_RAW_FIELD_PROOF).0; - transaction_proof.push((raw_tx, raw_tx_receipt)); - } - - Ok(EthereumTransactionInclusionProof { - block: block.0.hash.expect(BLOCK_HAS_HASH_FIELD_PROOF), - index: tx_index as _, - proof: transaction_proof, - }) - } -} - -/// Substrate node as transactions proof target. -#[derive(Clone)] -struct SubstrateTransactionsTarget { - client: SubstrateClient, - sign_params: RialtoSigningParams, - bridge_instance: Arc, -} - -#[async_trait] -impl RelayClient for SubstrateTransactionsTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateTransactionsTarget { - async fn tick(&self) { - async_std::task::sleep(Rialto::AVERAGE_BLOCK_INTERVAL).await; - } - - async fn is_header_known(&self, id: &EthereumHeaderId) -> Result { - self.client.ethereum_header_known(*id).await - } - - async fn is_header_finalized(&self, id: &EthereumHeaderId) -> Result { - // we check if header is finalized by simple comparison of the header number and - // number of best finalized PoA header known to Substrate node. - // - // this may lead to failure in tx proof import if PoA reorganization has happened - // after we have checked that our tx has been included into given block - // - // the fix is easy, but since this code is mostly developed for demonstration purposes, - // I'm leaving this KISS-based design here - let best_finalized_ethereum_block = self.client.best_ethereum_finalized_block().await?; - Ok(id.0 <= best_finalized_ethereum_block.0) - } - - async fn best_finalized_header_id(&self) -> Result { - // we can't continue to relay exchange proofs if Substrate node is out of sync, because - // it may have already received (some of) proofs that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_finalized_block().await - } - - async fn filter_transaction_proof(&self, proof: &EthereumTransactionInclusionProof) -> Result { - // let's try to parse transaction locally - let (raw_tx, raw_tx_receipt) = &proof.proof[proof.index as usize]; - let parse_result = rialto_runtime::exchange::EthTransaction::parse(raw_tx); - if parse_result.is_err() { - return Ok(false); - } - - // now let's check if transaction is successful - match bp_eth_poa::Receipt::is_successful_raw_receipt(raw_tx_receipt) { - Ok(true) => (), - _ => return Ok(false), - } - - // seems that transaction is relayable - let's check if runtime is able to import it - // (we can't if e.g. header is pruned or there's some issue with tx data) - self.client.verify_exchange_transaction_proof(proof.clone()).await - } - - async fn submit_transaction_proof(&self, proof: EthereumTransactionInclusionProof) -> Result<(), RpcError> { - let (sign_params, bridge_instance) = (self.sign_params.clone(), self.bridge_instance.clone()); - self.client - .submit_exchange_transaction_proof(sign_params, bridge_instance, proof) - .await - } -} - -/// Relay exchange transaction proof(s) to Substrate node. -pub fn run(params: EthereumExchangeParams) { - match params.mode { - ExchangeRelayMode::Single(eth_tx_hash) => run_single_transaction_relay(params, eth_tx_hash), - ExchangeRelayMode::Auto(eth_start_with_block_number) => { - run_auto_transactions_relay_loop(params, eth_start_with_block_number) - } - }; -} - -/// Run single transaction proof relay and stop. -fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H256) { - let mut local_pool = futures::executor::LocalPool::new(); - - let EthereumExchangeParams { - eth_params, - sub_params, - sub_sign, - instance, - .. - } = params; - - let result = local_pool.run_until(async move { - let eth_client = EthereumClient::new(eth_params); - let sub_client = SubstrateClient::::new(sub_params) - .await - .map_err(RpcError::Substrate)?; - - let source = EthereumTransactionsSource { client: eth_client }; - let target = SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }; - - relay_single_transaction_proof(&source, &target, eth_tx_hash).await - }); - - match result { - Ok(_) => { - log::info!( - target: "bridge", - "Ethereum transaction {} proof has been successfully submitted to Substrate node", - eth_tx_hash, - ); - } - Err(err) => { - log::error!( - target: "bridge", - "Error submitting Ethereum transaction {} proof to Substrate node: {}", - eth_tx_hash, - err, - ); - } - } -} - -/// Run auto-relay loop. -fn run_auto_transactions_relay_loop(params: EthereumExchangeParams, eth_start_with_block_number: Option) { - let EthereumExchangeParams { - eth_params, - sub_params, - sub_sign, - metrics_params, - instance, - .. - } = params; - - let do_run_loop = move || -> Result<(), String> { - let eth_client = EthereumClient::new(eth_params); - let sub_client = async_std::task::block_on(SubstrateClient::::new(sub_params)) - .map_err(|err| format!("Error starting Substrate client: {:?}", err))?; - - let eth_start_with_block_number = match eth_start_with_block_number { - Some(eth_start_with_block_number) => eth_start_with_block_number, - None => { - async_std::task::block_on(sub_client.best_ethereum_finalized_block()) - .map_err(|err| { - format!( - "Error retrieving best finalized Ethereum block from Substrate node: {:?}", - err - ) - })? - .0 - } - }; - - run_loop( - InMemoryStorage::new(eth_start_with_block_number), - EthereumTransactionsSource { client: eth_client }, - SubstrateTransactionsTarget { - client: sub_client, - sign_params: sub_sign, - bridge_instance: instance, - }, - metrics_params, - futures::future::pending(), - ); - - Ok(()) - }; - - if let Err(err) = do_run_loop() { - log::error!( - target: "bridge", - "Error auto-relaying Ethereum transactions proofs to Substrate node: {}", - err, - ); - } -} diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_exchange_submit.rs b/polkadot/bridges/relays/ethereum/src/ethereum_exchange_submit.rs deleted file mode 100644 index d2842b78a4a2e34191a3c526e6d4b3c5e61d9c3b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/ethereum_exchange_submit.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Submitting Ethereum -> Substrate exchange transactions. - -use bp_eth_poa::{ - signatures::{secret_to_address, SignTransaction}, - UnsignedTransaction, -}; -use relay_ethereum_client::{ - types::{CallRequest, U256}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, -}; -use rialto_runtime::exchange::LOCK_FUNDS_ADDRESS; - -/// Ethereum exchange transaction params. -#[derive(Debug)] -pub struct EthereumExchangeSubmitParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum signer nonce. - pub eth_nonce: Option, - /// Amount of Ethereum tokens to lock. - pub eth_amount: U256, - /// Funds recipient on Substrate side. - pub sub_recipient: [u8; 32], -} - -/// Submit single Ethereum -> Substrate exchange transaction. -pub fn run(params: EthereumExchangeSubmitParams) { - let mut local_pool = futures::executor::LocalPool::new(); - - let EthereumExchangeSubmitParams { - eth_params, - eth_sign, - eth_nonce, - eth_amount, - sub_recipient, - } = params; - - let result: Result<_, String> = local_pool.run_until(async move { - let eth_client = EthereumClient::new(eth_params); - - let eth_signer_address = secret_to_address(ð_sign.signer); - let sub_recipient_encoded = sub_recipient; - let nonce = match eth_nonce { - Some(eth_nonce) => eth_nonce, - None => eth_client - .account_nonce(eth_signer_address) - .await - .map_err(|err| format!("error fetching acount nonce: {:?}", err))?, - }; - let gas = eth_client - .estimate_gas(CallRequest { - from: Some(eth_signer_address), - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: Some(eth_amount), - data: Some(sub_recipient_encoded.to_vec().into()), - ..Default::default() - }) - .await - .map_err(|err| format!("error estimating gas requirements: {:?}", err))?; - let eth_tx_unsigned = UnsignedTransaction { - nonce, - gas_price: eth_sign.gas_price, - gas, - to: Some(LOCK_FUNDS_ADDRESS.into()), - value: eth_amount, - payload: sub_recipient_encoded.to_vec(), - }; - let eth_tx_signed = eth_tx_unsigned - .clone() - .sign_by(ð_sign.signer, Some(eth_sign.chain_id)); - eth_client - .submit_transaction(eth_tx_signed) - .await - .map_err(|err| format!("error submitting transaction: {:?}", err))?; - - Ok(eth_tx_unsigned) - }); - - match result { - Ok(eth_tx_unsigned) => { - log::info!( - target: "bridge", - "Exchange transaction has been submitted to Ethereum node: {:?}", - eth_tx_unsigned, - ); - } - Err(err) => { - log::error!( - target: "bridge", - "Error submitting exchange transaction to Ethereum node: {}", - err, - ); - } - } -} diff --git a/polkadot/bridges/relays/ethereum/src/ethereum_sync_loop.rs b/polkadot/bridges/relays/ethereum/src/ethereum_sync_loop.rs deleted file mode 100644 index c8741c2fe18a6b2c3b2a13490d308ef411843860..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/ethereum_sync_loop.rs +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Ethereum PoA -> Rialto-Substrate synchronization. - -use crate::ethereum_client::EthereumHighLevelRpc; -use crate::instances::BridgeInstance; -use crate::rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc}; -use crate::rpc_errors::RpcError; -use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::{HeadersSyncParams, TargetTransactionMode}, - sync_loop::{SourceClient, TargetClient}, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::{HeaderHash, HeaderId as EthereumHeaderId, Receipt, SyncHeader as Header}, - Client as EthereumClient, ConnectionParams as EthereumConnectionParams, -}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{ - Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; - -use std::fmt::Debug; -use std::{collections::HashSet, sync::Arc, time::Duration}; - -pub mod consts { - use super::*; - - /// Interval at which we check new Ethereum headers when we are synced/almost synced. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); - /// Max number of headers in single submit transaction. - pub const MAX_HEADERS_IN_SINGLE_SUBMIT: usize = 32; - /// Max total size of headers in single submit transaction. This only affects signed - /// submissions, when several headers are submitted at once. 4096 is the maximal **expected** - /// size of the Ethereum header + transactions receipts (if they're required). - pub const MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT: usize = MAX_HEADERS_IN_SINGLE_SUBMIT * 4096; - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 128; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 128; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned). - pub const PRUNE_DEPTH: u32 = 4096; -} - -/// Ethereum synchronization parameters. -#[derive(Debug)] -pub struct EthereumSyncParams { - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Substrate signing params. - pub sub_sign: RialtoSigningParams, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: Option, - /// Instance of the bridge pallet being synchronized. - pub instance: Arc, -} - -/// Ethereum synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct EthereumHeadersSyncPipeline; - -impl HeadersSyncPipeline for EthereumHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Ethereum"; - const TARGET_NAME: &'static str = "Substrate"; - - type Hash = HeaderHash; - type Number = u64; - type Header = Header; - type Extra = Vec; - type Completion = (); - - fn estimate_size(source: &QueuedHeader) -> usize { - into_substrate_ethereum_header(source.header()).encode().len() - + into_substrate_ethereum_receipts(source.extra()) - .map(|extra| extra.encode().len()) - .unwrap_or(0) - } -} - -/// Queued ethereum header ID. -pub type QueuedEthereumHeader = QueuedHeader; - -/// Ethereum client as headers source. -#[derive(Clone)] -struct EthereumHeadersSource { - /// Ethereum node client. - client: EthereumClient, -} - -impl EthereumHeadersSource { - fn new(client: EthereumClient) -> Self { - Self { client } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersSource { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect(); - Ok(()) - } -} - -#[async_trait] -impl SourceClient for EthereumHeadersSource { - async fn best_block_number(&self) -> Result { - // we **CAN** continue to relay headers if Ethereum node is out of sync, because - // Substrate node may be missing headers that are already available at the Ethereum - - self.client.best_block_number().await.map_err(Into::into) - } - - async fn header_by_hash(&self, hash: HeaderHash) -> Result { - self.client - .header_by_hash(hash) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_by_number(&self, number: u64) -> Result { - self.client - .header_by_number(number) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_completion(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, Option<()>), RpcError> { - Ok((id, None)) - } - - async fn header_extra( - &self, - id: EthereumHeaderId, - header: QueuedEthereumHeader, - ) -> Result<(EthereumHeaderId, Vec), RpcError> { - self.client - .transaction_receipts(id, header.header().transactions.clone()) - .await - } -} - -#[derive(Clone)] -struct SubstrateHeadersTarget { - /// Substrate node client. - client: SubstrateClient, - /// Whether we want to submit signed (true), or unsigned (false) transactions. - sign_transactions: bool, - /// Substrate signing params. - sign_params: RialtoSigningParams, - /// Bridge instance used in Ethereum to Substrate sync. - bridge_instance: Arc, -} - -impl SubstrateHeadersTarget { - fn new( - client: SubstrateClient, - sign_transactions: bool, - sign_params: RialtoSigningParams, - bridge_instance: Arc, - ) -> Self { - Self { - client, - sign_transactions, - sign_params, - bridge_instance, - } - } -} - -#[async_trait] -impl RelayClient for SubstrateHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - Ok(self.client.reconnect().await?) - } -} - -#[async_trait] -impl TargetClient for SubstrateHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Substrate node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_ethereum_block().await - } - - async fn is_known_header(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, bool), RpcError> { - Ok((id, self.client.ethereum_header_known(id).await?)) - } - - async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { - let (sign_params, bridge_instance, sign_transactions) = ( - self.sign_params.clone(), - self.bridge_instance.clone(), - self.sign_transactions, - ); - self.client - .submit_ethereum_headers(sign_params, bridge_instance, headers, sign_transactions) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - Ok(HashSet::new()) - } - - #[allow(clippy::unit_arg)] - async fn complete_header(&self, id: EthereumHeaderId, _completion: ()) -> Result { - Ok(id) - } - - async fn requires_extra(&self, header: QueuedEthereumHeader) -> Result<(EthereumHeaderId, bool), RpcError> { - // we can minimize number of receipts_check calls by checking header - // logs bloom here, but it may give us false positives (when authorities - // source is contract, we never need any logs) - let id = header.header().id(); - let sub_eth_header = into_substrate_ethereum_header(header.header()); - Ok((id, self.client.ethereum_receipts_required(sub_eth_header).await?)) - } -} - -/// Run Ethereum headers synchronization. -pub fn run(params: EthereumSyncParams) -> Result<(), RpcError> { - let EthereumSyncParams { - eth_params, - sub_params, - sub_sign, - sync_params, - metrics_params, - instance, - } = params; - - let eth_client = EthereumClient::new(eth_params); - let sub_client = async_std::task::block_on(async { SubstrateClient::::new(sub_params).await })?; - - let sign_sub_transactions = match sync_params.target_tx_mode { - TargetTransactionMode::Signed | TargetTransactionMode::Backup => true, - TargetTransactionMode::Unsigned => false, - }; - - let source = EthereumHeadersSource::new(eth_client); - let target = SubstrateHeadersTarget::new(sub_client, sign_sub_transactions, sub_sign, instance); - - headers_relay::sync_loop::run( - source, - consts::ETHEREUM_TICK_INTERVAL, - target, - Rialto::AVERAGE_BLOCK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ); - - Ok(()) -} diff --git a/polkadot/bridges/relays/ethereum/src/instances.rs b/polkadot/bridges/relays/ethereum/src/instances.rs deleted file mode 100644 index 7f29c26d8c362f360e349f8b1f34b06296c8d98e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/instances.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The PoA Bridge Pallet provides a way to include multiple instances of itself in a runtime. When -//! synchronizing a Substrate chain which can include multiple instances of the bridge pallet we -//! must somehow decide which of the instances to sync. -//! -//! Note that each instance of the bridge pallet is coupled with an instance of the currency exchange -//! pallet. We must also have a way to create `Call`s for the correct currency exchange instance. -//! -//! This module helps by preparing the correct `Call`s for each of the different pallet instances. - -use crate::ethereum_sync_loop::QueuedEthereumHeader; -use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}; - -use rialto_runtime::exchange::EthereumTransactionInclusionProof as Proof; -use rialto_runtime::Call; - -/// Interface for `Calls` which are needed to correctly sync the bridge. -/// -/// Each instance of the bridge and currency exchange pallets in the bridge runtime requires similar -/// but slightly different `Call` in order to be synchronized. -pub trait BridgeInstance: Send + Sync + std::fmt::Debug { - /// Used to build a `Call` for importing signed headers to a Substrate runtime. - fn build_signed_header_call(&self, headers: Vec) -> Call; - /// Used to build a `Call` for importing an unsigned header to a Substrate runtime. - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call; - /// Used to build a `Call` for importing peer transactions to a Substrate runtime. - fn build_currency_exchange_call(&self, proof: Proof) -> Call; -} - -/// Corresponds to the Rialto instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct RialtoPoA; - -impl BridgeInstance for RialtoPoA { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers( - headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(&header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - ); - - rialto_runtime::Call::BridgeRialtoPoA(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header( - into_substrate_ethereum_header(&header.header()), - into_substrate_ethereum_receipts(header.extra()), - ); - - rialto_runtime::Call::BridgeRialtoPoA(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof); - rialto_runtime::Call::BridgeRialtoCurrencyExchange(pallet_call) - } -} - -/// Corresponds to the Kovan instance used in the bridge runtime. -#[derive(Default, Clone, Debug)] -pub struct Kovan; - -impl BridgeInstance for Kovan { - fn build_signed_header_call(&self, headers: Vec) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers( - headers - .into_iter() - .map(|header| { - ( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ) - }) - .collect(), - ); - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { - let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header( - into_substrate_ethereum_header(header.header()), - into_substrate_ethereum_receipts(header.extra()), - ); - - rialto_runtime::Call::BridgeKovan(pallet_call) - } - - fn build_currency_exchange_call(&self, proof: Proof) -> Call { - let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof); - rialto_runtime::Call::BridgeKovanCurrencyExchange(pallet_call) - } -} diff --git a/polkadot/bridges/relays/ethereum/src/main.rs b/polkadot/bridges/relays/ethereum/src/main.rs deleted file mode 100644 index b75c0f44bb856a959fd8ff88decf291a8007d1bb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/main.rs +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![recursion_limit = "1024"] - -mod ethereum_client; -mod ethereum_deploy_contract; -mod ethereum_exchange; -mod ethereum_exchange_submit; -mod ethereum_sync_loop; -mod instances; -mod rialto_client; -mod rpc_errors; -mod substrate_sync_loop; -mod substrate_types; - -use ethereum_deploy_contract::EthereumDeployContractParams; -use ethereum_exchange::EthereumExchangeParams; -use ethereum_exchange_submit::EthereumExchangeSubmitParams; -use ethereum_sync_loop::EthereumSyncParams; -use headers_relay::sync::TargetTransactionMode; -use hex_literal::hex; -use instances::{BridgeInstance, Kovan, RialtoPoA}; -use relay_utils::{initialize::initialize_relay, metrics::MetricsParams}; -use secp256k1::SecretKey; -use sp_core::crypto::Pair; -use substrate_sync_loop::SubstrateSyncParams; - -use headers_relay::sync::HeadersSyncParams; -use relay_ethereum_client::{ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams}; -use relay_rialto_client::SigningParams as RialtoSigningParams; -use relay_substrate_client::ConnectionParams as SubstrateConnectionParams; -use std::sync::Arc; - -fn main() { - initialize_relay(); - - let yaml = clap::load_yaml!("cli.yml"); - let matches = clap::App::from_yaml(yaml).get_matches(); - match matches.subcommand() { - ("eth-to-sub", Some(eth_to_sub_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB relay."); - if ethereum_sync_loop::run(match ethereum_sync_params(ð_to_sub_matches) { - Ok(ethereum_sync_params) => ethereum_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return; - } - }) - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync."); - }; - } - ("sub-to-eth", Some(sub_to_eth_matches)) => { - log::info!(target: "bridge", "Starting SUB âž¡ ETH relay."); - if substrate_sync_loop::run(match substrate_sync_params(&sub_to_eth_matches) { - Ok(substrate_sync_params) => substrate_sync_params, - Err(err) => { - log::error!(target: "bridge", "Error parsing parameters: {}", err); - return; - } - }) - .is_err() - { - log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync."); - }; - } - ("eth-deploy-contract", Some(eth_deploy_matches)) => { - log::info!(target: "bridge", "Deploying ETH contracts."); - ethereum_deploy_contract::run(match ethereum_deploy_contract_params(ð_deploy_matches) { - Ok(ethereum_deploy_params) => ethereum_deploy_params, - Err(err) => { - log::error!(target: "bridge", "Error during contract deployment: {}", err); - return; - } - }); - } - ("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => { - log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction."); - ethereum_exchange_submit::run(match ethereum_exchange_submit_params(ð_exchange_submit_matches) { - Ok(eth_exchange_submit_params) => eth_exchange_submit_params, - Err(err) => { - log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err); - return; - } - }); - } - ("eth-exchange-sub", Some(eth_exchange_matches)) => { - log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay."); - ethereum_exchange::run(match ethereum_exchange_params(ð_exchange_matches) { - Ok(eth_exchange_params) => eth_exchange_params, - Err(err) => { - log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err); - return; - } - }); - } - ("", _) => { - log::error!(target: "bridge", "No subcommand specified"); - } - _ => unreachable!("all possible subcommands are checked above; qed"), - } -} - -fn ethereum_connection_params(matches: &clap::ArgMatches) -> Result { - let mut params = EthereumConnectionParams::default(); - if let Some(eth_host) = matches.value_of("eth-host") { - params.host = eth_host.into(); - } - if let Some(eth_port) = matches.value_of("eth-port") { - params.port = eth_port - .parse() - .map_err(|e| format!("Failed to parse eth-port: {}", e))?; - } - Ok(params) -} - -fn ethereum_signing_params(matches: &clap::ArgMatches) -> Result { - let mut params = EthereumSigningParams::default(); - if let Some(eth_signer) = matches.value_of("eth-signer") { - params.signer = - SecretKey::parse_slice(&hex::decode(eth_signer).map_err(|e| format!("Failed to parse eth-signer: {}", e))?) - .map_err(|e| format!("Invalid eth-signer: {}", e))?; - } - if let Some(eth_chain_id) = matches.value_of("eth-chain-id") { - params.chain_id = eth_chain_id - .parse::() - .map_err(|e| format!("Failed to parse eth-chain-id: {}", e))?; - } - Ok(params) -} - -fn substrate_connection_params(matches: &clap::ArgMatches) -> Result { - let mut params = SubstrateConnectionParams::default(); - if let Some(sub_host) = matches.value_of("sub-host") { - params.host = sub_host.into(); - } - if let Some(sub_port) = matches.value_of("sub-port") { - params.port = sub_port - .parse() - .map_err(|e| format!("Failed to parse sub-port: {}", e))?; - } - Ok(params) -} - -fn rialto_signing_params(matches: &clap::ArgMatches) -> Result { - let mut params = RialtoSigningParams::default(); - if let Some(sub_signer) = matches.value_of("sub-signer") { - let sub_signer_password = matches.value_of("sub-signer-password"); - params.signer = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password) - .map_err(|e| format!("Failed to parse sub-signer: {:?}", e))?; - } - Ok(params) -} - -fn ethereum_sync_params(matches: &clap::ArgMatches) -> Result { - use crate::ethereum_sync_loop::consts::*; - - let mut sync_params = HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_HEADERS_IN_SINGLE_SUBMIT, - max_headers_size_in_single_submit: MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }; - - match matches.value_of("sub-tx-mode") { - Some("signed") => sync_params.target_tx_mode = TargetTransactionMode::Signed, - Some("unsigned") => { - sync_params.target_tx_mode = TargetTransactionMode::Unsigned; - - // tx pool won't accept too much unsigned transactions - sync_params.max_headers_in_submitted_status = 10; - } - Some("backup") => sync_params.target_tx_mode = TargetTransactionMode::Backup, - Some(mode) => return Err(format!("Invalid sub-tx-mode: {}", mode)), - None => sync_params.target_tx_mode = TargetTransactionMode::Signed, - } - - let params = EthereumSyncParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - sync_params, - }; - - log::debug!(target: "bridge", "Ethereum sync params: {:?}", params); - - Ok(params) -} - -fn substrate_sync_params(matches: &clap::ArgMatches) -> Result { - use crate::substrate_sync_loop::consts::*; - - let eth_contract_address: relay_ethereum_client::types::Address = - if let Some(eth_contract) = matches.value_of("eth-contract") { - eth_contract.parse().map_err(|e| format!("{}", e))? - } else { - "731a10897d267e19b34503ad902d0a29173ba4b1" - .parse() - .expect("address is hardcoded, thus valid; qed") - }; - - let params = SubstrateSyncParams { - sub_params: substrate_connection_params(matches)?, - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - sync_params: HeadersSyncParams { - max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, - max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, - max_headers_in_single_submit: MAX_SUBMITTED_HEADERS, - max_headers_size_in_single_submit: std::usize::MAX, - prune_depth: PRUNE_DEPTH, - target_tx_mode: TargetTransactionMode::Signed, - }, - eth_contract_address, - }; - - log::debug!(target: "bridge", "Substrate sync params: {:?}", params); - - Ok(params) -} - -fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result { - let eth_contract_code = parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| { - hex::decode(include_str!("../res/substrate-bridge-bytecode.hex")).expect("code is hardcoded, thus valid; qed") - }); - let sub_initial_authorities_set_id = match matches.value_of("sub-authorities-set-id") { - Some(sub_initial_authorities_set_id) => Some( - sub_initial_authorities_set_id - .parse() - .map_err(|e| format!("Failed to parse sub-authorities-set-id: {}", e))?, - ), - None => None, - }; - let sub_initial_authorities_set = parse_hex_argument(matches, "sub-authorities-set")?; - let sub_initial_header = parse_hex_argument(matches, "sub-initial-header")?; - - let params = EthereumDeployContractParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_initial_authorities_set_id, - sub_initial_authorities_set, - sub_initial_header, - eth_contract_code, - }; - - log::debug!(target: "bridge", "Deploy params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result { - let eth_nonce = if let Some(eth_nonce) = matches.value_of("eth-nonce") { - Some( - relay_ethereum_client::types::U256::from_dec_str(ð_nonce) - .map_err(|e| format!("Failed to parse eth-nonce: {}", e))?, - ) - } else { - None - }; - - let eth_amount = if let Some(eth_amount) = matches.value_of("eth-amount") { - eth_amount - .parse() - .map_err(|e| format!("Failed to parse eth-amount: {}", e))? - } else { - // This is in Wei, represents 1 ETH - 1_000_000_000_000_000_000_u64.into() - }; - - // This is the well-known Substrate account of Ferdie - let default_recepient = hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c"); - - let sub_recipient = if let Some(sub_recipient) = matches.value_of("sub-recipient") { - hex::decode(&sub_recipient) - .map_err(|err| err.to_string()) - .and_then(|vsub_recipient| { - let expected_len = default_recepient.len(); - if expected_len != vsub_recipient.len() { - Err(format!("invalid length. Expected {} bytes", expected_len)) - } else { - let mut sub_recipient = default_recepient; - sub_recipient.copy_from_slice(&vsub_recipient[..expected_len]); - Ok(sub_recipient) - } - }) - .map_err(|e| format!("Failed to parse sub-recipient: {}", e))? - } else { - default_recepient - }; - - let params = EthereumExchangeSubmitParams { - eth_params: ethereum_connection_params(matches)?, - eth_sign: ethereum_signing_params(matches)?, - eth_nonce, - eth_amount, - sub_recipient, - }; - - log::debug!(target: "bridge", "Submit Ethereum exchange tx params: {:?}", params); - - Ok(params) -} - -fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result { - let mode = match matches.value_of("eth-tx-hash") { - Some(eth_tx_hash) => ethereum_exchange::ExchangeRelayMode::Single( - eth_tx_hash - .parse() - .map_err(|e| format!("Failed to parse eth-tx-hash: {}", e))?, - ), - None => ethereum_exchange::ExchangeRelayMode::Auto(match matches.value_of("eth-start-with-block") { - Some(eth_start_with_block) => Some( - eth_start_with_block - .parse() - .map_err(|e| format!("Failed to parse eth-start-with-block: {}", e))?, - ), - None => None, - }), - }; - - let params = EthereumExchangeParams { - eth_params: ethereum_connection_params(matches)?, - sub_params: substrate_connection_params(matches)?, - sub_sign: rialto_signing_params(matches)?, - metrics_params: metrics_params(matches)?, - instance: instance_params(matches)?, - mode, - }; - - log::debug!(target: "bridge", "Ethereum exchange params: {:?}", params); - - Ok(params) -} - -fn metrics_params(matches: &clap::ArgMatches) -> Result, String> { - if matches.is_present("no-prometheus") { - return Ok(None); - } - - let mut metrics_params = MetricsParams::default(); - - if let Some(prometheus_host) = matches.value_of("prometheus-host") { - metrics_params.host = prometheus_host.into(); - } - if let Some(prometheus_port) = matches.value_of("prometheus-port") { - metrics_params.port = prometheus_port - .parse() - .map_err(|e| format!("Failed to parse prometheus-port: {}", e))?; - } - - Ok(Some(metrics_params)) -} - -fn instance_params(matches: &clap::ArgMatches) -> Result, String> { - let instance = if let Some(instance) = matches.value_of("sub-pallet-instance") { - match instance.to_lowercase().as_str() { - "rialto" => Arc::new(RialtoPoA) as Arc, - "kovan" => Arc::new(Kovan), - _ => return Err("Unsupported bridge pallet instance".to_string()), - } - } else { - unreachable!("CLI config enforces a default instance, can never be None") - }; - - Ok(instance) -} - -fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> Result>, String> { - match matches.value_of(arg) { - Some(value) => Ok(Some( - hex::decode(value).map_err(|e| format!("Failed to parse {}: {}", arg, e))?, - )), - None => Ok(None), - } -} - -#[cfg(test)] -mod tests { - - // Details: https://github.com/paritytech/parity-bridges-common/issues/118 - #[test] - fn async_std_sleep_works() { - let mut local_pool = futures::executor::LocalPool::new(); - local_pool.run_until(async move { - async_std::task::sleep(std::time::Duration::from_secs(1)).await; - }); - } -} diff --git a/polkadot/bridges/relays/ethereum/src/rialto_client.rs b/polkadot/bridges/relays/ethereum/src/rialto_client.rs deleted file mode 100644 index 861ef8efeb97059e96163d0a55c9e39ebd594864..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/rialto_client.rs +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::ethereum_sync_loop::QueuedEthereumHeader; -use crate::instances::BridgeInstance; -use crate::rpc_errors::RpcError; - -use async_trait::async_trait; -use bp_eth_poa::AuraHeader as SubstrateEthereumHeader; -use codec::{Decode, Encode}; -use headers_relay::sync_types::SubmittedHeaders; -use relay_ethereum_client::types::HeaderId as EthereumHeaderId; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Client as SubstrateClient, TransactionSignScheme}; -use relay_utils::HeaderId; -use sp_core::{crypto::Pair, Bytes}; -use std::{collections::VecDeque, sync::Arc}; - -const ETH_API_IMPORT_REQUIRES_RECEIPTS: &str = "RialtoPoAHeaderApi_is_import_requires_receipts"; -const ETH_API_IS_KNOWN_BLOCK: &str = "RialtoPoAHeaderApi_is_known_block"; -const ETH_API_BEST_BLOCK: &str = "RialtoPoAHeaderApi_best_block"; -const ETH_API_BEST_FINALIZED_BLOCK: &str = "RialtoPoAHeaderApi_finalized_block"; -const EXCH_API_FILTER_TRANSACTION_PROOF: &str = "RialtoCurrencyExchangeApi_filter_transaction_proof"; - -type RpcResult = std::result::Result; - -/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated -/// interactions involving, for example, an Ethereum bridge module. -#[async_trait] -pub trait SubstrateHighLevelRpc { - /// Returns best Ethereum block that Substrate runtime knows of. - async fn best_ethereum_block(&self) -> RpcResult; - /// Returns best finalized Ethereum block that Substrate runtime knows of. - async fn best_ethereum_finalized_block(&self) -> RpcResult; - /// Returns whether or not transactions receipts are required for Ethereum header submission. - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult; - /// Returns whether or not the given Ethereum header is known to the Substrate runtime. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult; -} - -#[async_trait] -impl SubstrateHighLevelRpc for SubstrateClient { - async fn best_ethereum_block(&self) -> RpcResult { - let call = ETH_API_BEST_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn best_ethereum_finalized_block(&self) -> RpcResult { - let call = ETH_API_BEST_FINALIZED_BLOCK.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = self.state_call(call, data, None).await?; - let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) - } - - async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult { - let call = ETH_API_IMPORT_REQUIRES_RECEIPTS.to_string(); - let data = Bytes(header.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let receipts_required: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(receipts_required) - } - - // The Substrate module could prune old headers. So this function could return false even - // if header is synced. And we'll mark corresponding Ethereum header as Orphan. - // - // But when we read the best header from Substrate next time, we will know that - // there's a better header. This Orphan will either be marked as synced, or - // eventually pruned. - async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult { - let call = ETH_API_IS_KNOWN_BLOCK.to_string(); - let data = Bytes(header_id.1.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_known_block: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_known_block) - } -} - -/// A trait for RPC calls which are used to submit Ethereum headers to a Substrate -/// runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumHeaders { - /// Submits Ethereum header to Substrate runtime. - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders; - - /// Submits signed Ethereum header to Substrate runtime. - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; - - /// Submits unsigned Ethereum header to Substrate runtime. - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders; -} - -#[async_trait] -impl SubmitEthereumHeaders for SubstrateClient { - async fn submit_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - sign_transactions: bool, - ) -> SubmittedHeaders { - if sign_transactions { - self.submit_signed_ethereum_headers(params, instance, headers).await - } else { - self.submit_unsigned_ethereum_headers(instance, headers).await - } - } - - async fn submit_signed_ethereum_headers( - &self, - params: RialtoSigningParams, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let ids = headers.iter().map(|header| header.id()).collect(); - let submission_result = async { - let account_id = params.signer.public().as_array_ref().clone().into(); - let nonce = self.next_account_index(account_id).await?; - - let call = instance.build_signed_header_call(headers); - let transaction = Rialto::sign_transaction(self, ¶ms.signer, nonce, call); - - let _ = self.submit_extrinsic(Bytes(transaction.encode())).await?; - Ok(()) - } - .await; - - match submission_result { - Ok(_) => SubmittedHeaders { - submitted: ids, - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - }, - Err(error) => SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: ids, - fatal_error: Some(error), - }, - } - } - - async fn submit_unsigned_ethereum_headers( - &self, - instance: Arc, - headers: Vec, - ) -> SubmittedHeaders { - let mut ids = headers.iter().map(|header| header.id()).collect::>(); - let mut submitted_headers = SubmittedHeaders::default(); - - for header in headers { - let id = ids.pop_front().expect("both collections have same size; qed"); - - let call = instance.build_unsigned_header_call(header); - let transaction = create_unsigned_submit_transaction(call); - - match self.submit_extrinsic(Bytes(transaction.encode())).await { - Ok(_) => submitted_headers.submitted.push(id), - Err(error) => { - submitted_headers.rejected.push(id); - submitted_headers.rejected.extend(ids); - submitted_headers.fatal_error = Some(error.into()); - break; - } - } - } - - submitted_headers - } -} - -/// A trait for RPC calls which are used to submit proof of Ethereum exchange transaction to a -/// Substrate runtime. These are typically calls which use a combination of other low-level RPC -/// calls. -#[async_trait] -pub trait SubmitEthereumExchangeTransactionProof { - /// Pre-verify Ethereum exchange transaction proof. - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult; - /// Submits Ethereum exchange transaction proof to Substrate runtime. - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()>; -} - -#[async_trait] -impl SubmitEthereumExchangeTransactionProof for SubstrateClient { - async fn verify_exchange_transaction_proof( - &self, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult { - let call = EXCH_API_FILTER_TRANSACTION_PROOF.to_string(); - let data = Bytes(proof.encode()); - - let encoded_response = self.state_call(call, data, None).await?; - let is_allowed: bool = Decode::decode(&mut &encoded_response.0[..])?; - - Ok(is_allowed) - } - - async fn submit_exchange_transaction_proof( - &self, - params: RialtoSigningParams, - instance: Arc, - proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, - ) -> RpcResult<()> { - let account_id = params.signer.public().as_array_ref().clone().into(); - let nonce = self.next_account_index(account_id).await?; - - let call = instance.build_currency_exchange_call(proof); - let transaction = Rialto::sign_transaction(self, ¶ms.signer, nonce, call); - - let _ = self.submit_extrinsic(Bytes(transaction.encode())).await?; - Ok(()) - } -} - -/// Create unsigned Substrate transaction for submitting Ethereum header. -fn create_unsigned_submit_transaction(call: rialto_runtime::Call) -> rialto_runtime::UncheckedExtrinsic { - rialto_runtime::UncheckedExtrinsic::new_unsigned(call) -} diff --git a/polkadot/bridges/relays/ethereum/src/rpc_errors.rs b/polkadot/bridges/relays/ethereum/src/rpc_errors.rs deleted file mode 100644 index 9f7f14cf9a1f5c8150ab49cee0c045421bc6de4a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/rpc_errors.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use relay_ethereum_client::Error as EthereumNodeError; -use relay_substrate_client::Error as SubstrateNodeError; -use relay_utils::MaybeConnectionError; - -/// Contains common errors that can occur when -/// interacting with a Substrate or Ethereum node -/// through RPC. -#[derive(Debug)] -pub enum RpcError { - /// The arguments to the RPC method failed to serialize. - Serialization(serde_json::Error), - /// An error occured when interacting with an Ethereum node. - Ethereum(EthereumNodeError), - /// An error occured when interacting with a Substrate node. - Substrate(SubstrateNodeError), -} - -impl From for String { - fn from(err: RpcError) -> Self { - match err { - RpcError::Serialization(e) => e.to_string(), - RpcError::Ethereum(e) => e.to_string(), - RpcError::Substrate(e) => e.to_string(), - } - } -} - -impl From for RpcError { - fn from(err: serde_json::Error) -> Self { - Self::Serialization(err) - } -} - -impl From for RpcError { - fn from(err: EthereumNodeError) -> Self { - Self::Ethereum(err) - } -} - -impl From for RpcError { - fn from(err: SubstrateNodeError) -> Self { - Self::Substrate(err) - } -} - -impl From for RpcError { - fn from(err: ethabi::Error) -> Self { - Self::Ethereum(EthereumNodeError::ResponseParseFailed(format!("{}", err))) - } -} - -impl MaybeConnectionError for RpcError { - fn is_connection_error(&self) -> bool { - match self { - RpcError::Ethereum(ref error) => error.is_connection_error(), - RpcError::Substrate(ref error) => error.is_connection_error(), - _ => false, - } - } -} - -impl From for RpcError { - fn from(err: codec::Error) -> Self { - Self::Substrate(SubstrateNodeError::ResponseParseFailed(err)) - } -} diff --git a/polkadot/bridges/relays/ethereum/src/substrate_sync_loop.rs b/polkadot/bridges/relays/ethereum/src/substrate_sync_loop.rs deleted file mode 100644 index a1cea3f42423071c37bb29c7e937f5d5f7987cb4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/substrate_sync_loop.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-Substrate -> Ethereum PoA synchronization. - -use crate::ethereum_client::EthereumHighLevelRpc; -use crate::rpc_errors::RpcError; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::HeadersSyncParams, - sync_loop::TargetClient, - sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, -}; -use relay_ethereum_client::{ - types::Address, Client as EthereumClient, ConnectionParams as EthereumConnectionParams, - SigningParams as EthereumSigningParams, -}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SyncHeader as RialtoSyncHeader}; -use relay_substrate_client::{ - headers_source::HeadersSource, Chain as SubstrateChain, Client as SubstrateClient, - ConnectionParams as SubstrateConnectionParams, -}; -use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; -use sp_runtime::Justification; - -use std::fmt::Debug; -use std::{collections::HashSet, time::Duration}; - -pub mod consts { - use super::Duration; - - /// Interval at which we check new Ethereum blocks. - pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(5); - /// Max Ethereum headers we want to have in all 'before-submitted' states. - pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 8; - /// Max Ethereum headers count we want to have in 'submitted' state. - pub const MAX_SUBMITTED_HEADERS: usize = 4; - /// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned). - pub const PRUNE_DEPTH: u32 = 256; -} - -/// Substrate synchronization parameters. -#[derive(Debug)] -pub struct SubstrateSyncParams { - /// Substrate connection params. - pub sub_params: SubstrateConnectionParams, - /// Ethereum connection params. - pub eth_params: EthereumConnectionParams, - /// Ethereum signing params. - pub eth_sign: EthereumSigningParams, - /// Ethereum bridge contract address. - pub eth_contract_address: Address, - /// Synchronization parameters. - pub sync_params: HeadersSyncParams, - /// Metrics parameters. - pub metrics_params: Option, -} - -/// Substrate synchronization pipeline. -#[derive(Clone, Copy, Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct SubstrateHeadersSyncPipeline; - -impl HeadersSyncPipeline for SubstrateHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Substrate"; - const TARGET_NAME: &'static str = "Ethereum"; - - type Hash = rialto_runtime::Hash; - type Number = rialto_runtime::BlockNumber; - type Header = RialtoSyncHeader; - type Extra = (); - type Completion = Justification; - - fn estimate_size(source: &QueuedHeader) -> usize { - source.header().encode().len() - } -} - -/// Queued substrate header ID. -pub type QueuedRialtoHeader = QueuedHeader; - -/// Rialto node as headers source. -type SubstrateHeadersSource = HeadersSource; - -/// Ethereum client as Substrate headers target. -#[derive(Clone)] -struct EthereumHeadersTarget { - /// Ethereum node client. - client: EthereumClient, - /// Bridge contract address. - contract: Address, - /// Ethereum signing params. - sign_params: EthereumSigningParams, -} - -impl EthereumHeadersTarget { - fn new(client: EthereumClient, contract: Address, sign_params: EthereumSigningParams) -> Self { - Self { - client, - contract, - sign_params, - } - } -} - -#[async_trait] -impl RelayClient for EthereumHeadersTarget { - type Error = RpcError; - - async fn reconnect(&mut self) -> Result<(), RpcError> { - self.client.reconnect(); - Ok(()) - } -} - -#[async_trait] -impl TargetClient for EthereumHeadersTarget { - async fn best_header_id(&self) -> Result { - // we can't continue to relay headers if Ethereum node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - self.client.best_substrate_block(self.contract).await - } - - async fn is_known_header(&self, id: RialtoHeaderId) -> Result<(RialtoHeaderId, bool), RpcError> { - self.client.substrate_header_known(self.contract, id).await - } - - async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { - self.client - .submit_substrate_headers(self.sign_params.clone(), self.contract, headers) - .await - } - - async fn incomplete_headers_ids(&self) -> Result, RpcError> { - self.client.incomplete_substrate_headers(self.contract).await - } - - async fn complete_header(&self, id: RialtoHeaderId, completion: Justification) -> Result { - self.client - .complete_substrate_header(self.sign_params.clone(), self.contract, id, completion) - .await - } - - async fn requires_extra(&self, header: QueuedRialtoHeader) -> Result<(RialtoHeaderId, bool), RpcError> { - Ok((header.header().id(), false)) - } -} - -/// Run Substrate headers synchronization. -pub fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { - let SubstrateSyncParams { - sub_params, - eth_params, - eth_sign, - eth_contract_address, - sync_params, - metrics_params, - } = params; - - let eth_client = EthereumClient::new(eth_params); - let sub_client = async_std::task::block_on(async { SubstrateClient::::new(sub_params).await })?; - - let target = EthereumHeadersTarget::new(eth_client, eth_contract_address, eth_sign); - let source = SubstrateHeadersSource::new(sub_client); - - headers_relay::sync_loop::run( - source, - Rialto::AVERAGE_BLOCK_INTERVAL, - target, - consts::ETHEREUM_TICK_INTERVAL, - (), - sync_params, - metrics_params, - futures::future::pending(), - ); - - Ok(()) -} diff --git a/polkadot/bridges/relays/ethereum/src/substrate_types.rs b/polkadot/bridges/relays/ethereum/src/substrate_types.rs deleted file mode 100644 index b88f383139385500316f3150ca964a3c28d8a53e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/ethereum/src/substrate_types.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Converting between Ethereum headers and bridge module types. - -use bp_eth_poa::{ - AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry, Receipt as SubstrateEthereumReceipt, - TransactionOutcome as SubstrateEthereumTransactionOutcome, -}; -use relay_ethereum_client::types::{ - Header as EthereumHeader, Receipt as EthereumReceipt, HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF, -}; - -/// Convert Ethereum header into Ethereum header for Substrate. -pub fn into_substrate_ethereum_header(header: &EthereumHeader) -> SubstrateEthereumHeader { - SubstrateEthereumHeader { - parent_hash: header.parent_hash, - timestamp: header.timestamp.as_u64(), - number: header.number.expect(ETHEREUM_HEADER_ID_PROOF).as_u64(), - author: header.author, - transactions_root: header.transactions_root, - uncles_hash: header.uncles_hash, - extra_data: header.extra_data.0.clone(), - state_root: header.state_root, - receipts_root: header.receipts_root, - log_bloom: header.logs_bloom.unwrap_or_default().data().into(), - gas_used: header.gas_used, - gas_limit: header.gas_limit, - difficulty: header.difficulty, - seal: header.seal_fields.iter().map(|s| s.0.clone()).collect(), - } -} - -/// Convert Ethereum transactions receipts into Ethereum transactions receipts for Substrate. -pub fn into_substrate_ethereum_receipts( - receipts: &Option>, -) -> Option> { - receipts - .as_ref() - .map(|receipts| receipts.iter().map(into_substrate_ethereum_receipt).collect()) -} - -/// Convert Ethereum transactions receipt into Ethereum transactions receipt for Substrate. -pub fn into_substrate_ethereum_receipt(receipt: &EthereumReceipt) -> SubstrateEthereumReceipt { - SubstrateEthereumReceipt { - gas_used: receipt.cumulative_gas_used, - log_bloom: receipt.logs_bloom.data().into(), - logs: receipt - .logs - .iter() - .map(|log_entry| SubstrateEthereumLogEntry { - address: log_entry.address, - topics: log_entry.topics.clone(), - data: log_entry.data.0.clone(), - }) - .collect(), - outcome: match (receipt.status, receipt.root) { - (Some(status), None) => SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8), - (None, Some(root)) => SubstrateEthereumTransactionOutcome::StateRoot(root), - _ => SubstrateEthereumTransactionOutcome::Unknown, - }, - } -} diff --git a/polkadot/bridges/relays/exchange-relay/Cargo.toml b/polkadot/bridges/relays/exchange-relay/Cargo.toml deleted file mode 100644 index 62e7a029bbb2c996f49a2bfae213e9434ae07901..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange-relay/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "exchange-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -futures = "0.3.5" -log = "0.4.11" -num-traits = "0.2" -parking_lot = "0.11.0" -relay-utils = { path = "../utils" } diff --git a/polkadot/bridges/relays/exchange-relay/src/exchange.rs b/polkadot/bridges/relays/exchange-relay/src/exchange.rs deleted file mode 100644 index cdf9c1a9f3588976acdb7e33b65131860592ee70..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange-relay/src/exchange.rs +++ /dev/null @@ -1,916 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of exchange transaction. - -use async_trait::async_trait; -use relay_utils::{ - relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, -}; -use std::{ - fmt::{Debug, Display}, - string::ToString, -}; - -/// Transaction proof pipeline. -pub trait TransactionProofPipeline { - /// Name of the transaction proof source. - const SOURCE_NAME: &'static str; - /// Name of the transaction proof target. - const TARGET_NAME: &'static str; - - /// Block type. - type Block: SourceBlock; - /// Transaction inclusion proof type. - type TransactionProof; -} - -/// Block that is participating in exchange. -pub trait SourceBlock { - /// Block hash type. - type Hash: Clone + Debug + Display; - /// Block number type. - type Number: Debug - + Display - + Clone - + Copy - + Into - + std::cmp::Ord - + std::ops::Add - + num_traits::One; - /// Block transaction. - type Transaction: SourceTransaction; - - /// Return hash of the block. - fn id(&self) -> relay_utils::HeaderId; - /// Return block transactions iterator. - fn transactions(&self) -> Vec; -} - -/// Transaction that is participating in exchange. -pub trait SourceTransaction { - /// Transaction hash type. - type Hash: Debug + Display; - - /// Return transaction hash. - fn hash(&self) -> Self::Hash; -} - -/// Block hash for given pipeline. -pub type BlockHashOf

= <

::Block as SourceBlock>::Hash; - -/// Block number for given pipeline. -pub type BlockNumberOf

= <

::Block as SourceBlock>::Number; - -/// Transaction hash for given pipeline. -pub type TransactionOf

= <

::Block as SourceBlock>::Transaction; - -/// Transaction hash for given pipeline. -pub type TransactionHashOf

= as SourceTransaction>::Hash; - -/// Header id. -pub type HeaderId

= relay_utils::HeaderId, BlockNumberOf

>; - -/// Source client API. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Sleep until exchange-related data is (probably) updated. - async fn tick(&self); - /// Get block by hash. - async fn block_by_hash(&self, hash: BlockHashOf

) -> Result; - /// Get canonical block by number. - async fn block_by_number(&self, number: BlockNumberOf

) -> Result; - /// Return block + index where transaction has been **mined**. May return `Ok(None)` if transaction - /// is unknown to the source node. - async fn transaction_block(&self, hash: &TransactionHashOf

) - -> Result, usize)>, Self::Error>; - /// Prepare transaction proof. - async fn transaction_proof(&self, block: &P::Block, tx_index: usize) -> Result; -} - -/// Target client API. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Sleep until exchange-related data is (probably) updated. - async fn tick(&self); - /// Returns `Ok(true)` if header is known to the target node. - async fn is_header_known(&self, id: &HeaderId

) -> Result; - /// Returns `Ok(true)` if header is finalized by the target node. - async fn is_header_finalized(&self, id: &HeaderId

) -> Result; - /// Returns best finalized header id. - async fn best_finalized_header_id(&self) -> Result, Self::Error>; - /// Returns `Ok(true)` if transaction proof is need to be relayed. - async fn filter_transaction_proof(&self, proof: &P::TransactionProof) -> Result; - /// Submits transaction proof to the target node. - async fn submit_transaction_proof(&self, proof: P::TransactionProof) -> Result<(), Self::Error>; -} - -/// Block transaction statistics. -#[derive(Debug, Default)] -#[cfg_attr(test, derive(PartialEq))] -pub struct RelayedBlockTransactions { - /// Total number of transactions processed (either relayed or ignored) so far. - pub processed: usize, - /// Total number of transactions successfully relayed so far. - pub relayed: usize, - /// Total number of transactions that we have failed to relay so far. - pub failed: usize, -} - -/// Relay all suitable transactions from single block. -/// -/// If connection error occurs, returns Err with number of successfully processed transactions. -/// If some other error occurs, it is ignored and other transactions are processed. -/// -/// All transaction-level traces are written by this function. This function is not tracing -/// any information about block. -pub async fn relay_block_transactions( - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - source_block: &P::Block, - mut relayed_transactions: RelayedBlockTransactions, -) -> Result { - let transactions_to_process = source_block - .transactions() - .into_iter() - .enumerate() - .skip(relayed_transactions.processed); - for (source_tx_index, source_tx) in transactions_to_process { - let result = async { - let source_tx_id = format!("{}/{}", source_block.id().1, source_tx_index); - let source_tx_proof = - prepare_transaction_proof(source_client, &source_tx_id, source_block, source_tx_index) - .await - .map_err(|e| (FailedClient::Source, e))?; - - let needs_to_be_relayed = - target_client - .filter_transaction_proof(&source_tx_proof) - .await - .map_err(|err| { - ( - FailedClient::Target, - StringifiedMaybeConnectionError::new( - err.is_connection_error(), - format!("Transaction filtering has failed with {:?}", err), - ), - ) - })?; - - if !needs_to_be_relayed { - return Ok(false); - } - - relay_ready_transaction_proof(target_client, &source_tx_id, source_tx_proof) - .await - .map(|_| true) - .map_err(|e| (FailedClient::Target, e)) - } - .await; - - // We have two options here: - // 1) retry with the same transaction later; - // 2) report error and proceed with next transaction. - // - // Option#1 may seems better, but: - // 1) we do not track if transaction is mined (without an error) by the target node; - // 2) error could be irrecoverable (e.g. when block is already pruned by bridge module or tx - // has invalid format) && we'll end up in infinite loop of retrying the same transaction proof. - // - // So we're going with option#2 here (the only exception are connection errors). - match result { - Ok(false) => { - relayed_transactions.processed += 1; - } - Ok(true) => { - log::info!( - target: "bridge", - "{} transaction {} proof has been successfully submitted to {} node", - P::SOURCE_NAME, - source_tx.hash(), - P::TARGET_NAME, - ); - - relayed_transactions.processed += 1; - relayed_transactions.relayed += 1; - } - Err((failed_client, err)) => { - log::error!( - target: "bridge", - "Error relaying {} transaction {} proof to {} node: {}. {}", - P::SOURCE_NAME, - source_tx.hash(), - P::TARGET_NAME, - err.to_string(), - if err.is_connection_error() { - "Going to retry after delay..." - } else { - "You may need to submit proof of this transaction manually" - }, - ); - - if err.is_connection_error() { - return Err((failed_client, relayed_transactions)); - } - - relayed_transactions.processed += 1; - relayed_transactions.failed += 1; - } - } - } - - Ok(relayed_transactions) -} - -/// Relay single transaction proof. -pub async fn relay_single_transaction_proof( - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - source_tx_hash: TransactionHashOf

, -) -> Result<(), String> { - // wait for transaction and header on source node - let (source_header_id, source_tx_index) = wait_transaction_mined(source_client, &source_tx_hash).await?; - let source_block = source_client.block_by_hash(source_header_id.1.clone()).await; - let source_block = source_block.map_err(|err| { - format!( - "Error retrieving block {} from {} node: {:?}", - source_header_id.1, - P::SOURCE_NAME, - err, - ) - })?; - - // wait for transaction and header on target node - wait_header_imported(target_client, &source_header_id).await?; - wait_header_finalized(target_client, &source_header_id).await?; - - // and finally - prepare and submit transaction proof to target node - let source_tx_id = format!("{}", source_tx_hash); - relay_ready_transaction_proof( - target_client, - &source_tx_id, - prepare_transaction_proof(source_client, &source_tx_id, &source_block, source_tx_index) - .await - .map_err(|err| err.to_string())?, - ) - .await - .map_err(|err| err.to_string()) -} - -/// Prepare transaction proof. -async fn prepare_transaction_proof( - source_client: &impl SourceClient

, - source_tx_id: &str, - source_block: &P::Block, - source_tx_index: usize, -) -> Result { - source_client - .transaction_proof(source_block, source_tx_index) - .await - .map_err(|err| { - StringifiedMaybeConnectionError::new( - err.is_connection_error(), - format!( - "Error building transaction {} proof on {} node: {:?}", - source_tx_id, - P::SOURCE_NAME, - err, - ), - ) - }) -} - -/// Relay prepared proof of transaction. -async fn relay_ready_transaction_proof( - target_client: &impl TargetClient

, - source_tx_id: &str, - source_tx_proof: P::TransactionProof, -) -> Result<(), StringifiedMaybeConnectionError> { - target_client - .submit_transaction_proof(source_tx_proof) - .await - .map_err(|err| { - StringifiedMaybeConnectionError::new( - err.is_connection_error(), - format!( - "Error submitting transaction {} proof to {} node: {:?}", - source_tx_id, - P::TARGET_NAME, - err, - ), - ) - }) -} - -/// Wait until transaction is mined by source node. -async fn wait_transaction_mined( - source_client: &impl SourceClient

, - source_tx_hash: &TransactionHashOf

, -) -> Result<(HeaderId

, usize), String> { - loop { - let source_header_and_tx = source_client.transaction_block(&source_tx_hash).await.map_err(|err| { - format!( - "Error retrieving transaction {} from {} node: {:?}", - source_tx_hash, - P::SOURCE_NAME, - err, - ) - })?; - match source_header_and_tx { - Some((source_header_id, source_tx)) => { - log::info!( - target: "bridge", - "Transaction {} is retrieved from {} node. Continuing...", - source_tx_hash, - P::SOURCE_NAME, - ); - - return Ok((source_header_id, source_tx)); - } - None => { - log::info!( - target: "bridge", - "Waiting for transaction {} to be mined by {} node...", - source_tx_hash, - P::SOURCE_NAME, - ); - - source_client.tick().await; - } - } - } -} - -/// Wait until target node imports required header. -async fn wait_header_imported( - target_client: &impl TargetClient

, - source_header_id: &HeaderId

, -) -> Result<(), String> { - loop { - let is_header_known = target_client.is_header_known(&source_header_id).await.map_err(|err| { - format!( - "Failed to check existence of header {}/{} on {} node: {:?}", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - err, - ) - })?; - match is_header_known { - true => { - log::info!( - target: "bridge", - "Header {}/{} is known to {} node. Continuing.", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - return Ok(()); - } - false => { - log::info!( - target: "bridge", - "Waiting for header {}/{} to be imported by {} node...", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - target_client.tick().await; - } - } - } -} - -/// Wait until target node finalizes required header. -async fn wait_header_finalized( - target_client: &impl TargetClient

, - source_header_id: &HeaderId

, -) -> Result<(), String> { - loop { - let is_header_finalized = target_client - .is_header_finalized(&source_header_id) - .await - .map_err(|err| { - format!( - "Failed to check finality of header {}/{} on {} node: {:?}", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - err, - ) - })?; - match is_header_finalized { - true => { - log::info!( - target: "bridge", - "Header {}/{} is finalizd by {} node. Continuing.", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - return Ok(()); - } - false => { - log::info!( - target: "bridge", - "Waiting for header {}/{} to be finalized by {} node...", - source_header_id.0, - source_header_id.1, - P::TARGET_NAME, - ); - - target_client.tick().await; - } - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - - use parking_lot::Mutex; - use relay_utils::HeaderId; - use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - }; - - pub fn test_block_id() -> TestHeaderId { - HeaderId(1, 1) - } - - pub fn test_next_block_id() -> TestHeaderId { - HeaderId(2, 2) - } - - pub fn test_transaction_hash(tx_index: u64) -> TestTransactionHash { - 200 + tx_index - } - - pub fn test_transaction(tx_index: u64) -> TestTransaction { - TestTransaction(test_transaction_hash(tx_index)) - } - - pub fn test_block() -> TestBlock { - TestBlock(test_block_id(), vec![test_transaction(0)]) - } - - pub fn test_next_block() -> TestBlock { - TestBlock(test_next_block_id(), vec![test_transaction(1)]) - } - - pub type TestBlockNumber = u64; - pub type TestBlockHash = u64; - pub type TestTransactionHash = u64; - pub type TestHeaderId = HeaderId; - - #[derive(Debug, Clone, PartialEq)] - pub struct TestError(pub bool); - - impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - self.0 - } - } - - pub struct TestTransactionProofPipeline; - - impl TransactionProofPipeline for TestTransactionProofPipeline { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type Block = TestBlock; - type TransactionProof = TestTransactionProof; - } - - #[derive(Debug, Clone)] - pub struct TestBlock(pub TestHeaderId, pub Vec); - - impl SourceBlock for TestBlock { - type Hash = TestBlockHash; - type Number = TestBlockNumber; - type Transaction = TestTransaction; - - fn id(&self) -> TestHeaderId { - self.0 - } - - fn transactions(&self) -> Vec { - self.1.clone() - } - } - - #[derive(Debug, Clone)] - pub struct TestTransaction(pub TestTransactionHash); - - impl SourceTransaction for TestTransaction { - type Hash = TestTransactionHash; - - fn hash(&self) -> Self::Hash { - self.0 - } - } - - #[derive(Debug, Clone, PartialEq)] - pub struct TestTransactionProof(pub TestTransactionHash); - - #[derive(Clone)] - pub struct TestTransactionsSource { - pub on_tick: Arc, - pub data: Arc>, - } - - pub struct TestTransactionsSourceData { - pub block: Result, - pub transaction_block: Result, TestError>, - pub proofs_to_fail: HashMap, - } - - impl TestTransactionsSource { - pub fn new(on_tick: Box) -> Self { - Self { - on_tick: Arc::new(on_tick), - data: Arc::new(Mutex::new(TestTransactionsSourceData { - block: Ok(test_block()), - transaction_block: Ok(Some((test_block_id(), 0))), - proofs_to_fail: HashMap::new(), - })), - } - } - } - - #[async_trait] - impl RelayClient for TestTransactionsSource { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - Ok(()) - } - } - - #[async_trait] - impl SourceClient for TestTransactionsSource { - async fn tick(&self) { - (self.on_tick)(&mut *self.data.lock()) - } - - async fn block_by_hash(&self, _: TestBlockHash) -> Result { - self.data.lock().block.clone() - } - - async fn block_by_number(&self, _: TestBlockNumber) -> Result { - self.data.lock().block.clone() - } - - async fn transaction_block(&self, _: &TestTransactionHash) -> Result, TestError> { - self.data.lock().transaction_block.clone() - } - - async fn transaction_proof(&self, block: &TestBlock, index: usize) -> Result { - let tx_hash = block.1[index].hash(); - let proof_error = self.data.lock().proofs_to_fail.get(&tx_hash).cloned(); - if let Some(err) = proof_error { - return Err(err); - } - - Ok(TestTransactionProof(tx_hash)) - } - } - - #[derive(Clone)] - pub struct TestTransactionsTarget { - pub on_tick: Arc, - pub data: Arc>, - } - - pub struct TestTransactionsTargetData { - pub is_header_known: Result, - pub is_header_finalized: Result, - pub best_finalized_header_id: Result, - pub transactions_to_accept: HashSet, - pub submitted_proofs: Vec, - } - - impl TestTransactionsTarget { - pub fn new(on_tick: Box) -> Self { - Self { - on_tick: Arc::new(on_tick), - data: Arc::new(Mutex::new(TestTransactionsTargetData { - is_header_known: Ok(true), - is_header_finalized: Ok(true), - best_finalized_header_id: Ok(test_block_id()), - transactions_to_accept: vec![test_transaction_hash(0)].into_iter().collect(), - submitted_proofs: Vec::new(), - })), - } - } - } - - #[async_trait] - impl RelayClient for TestTransactionsTarget { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - Ok(()) - } - } - - #[async_trait] - impl TargetClient for TestTransactionsTarget { - async fn tick(&self) { - (self.on_tick)(&mut *self.data.lock()) - } - - async fn is_header_known(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_known.clone() - } - - async fn is_header_finalized(&self, _: &TestHeaderId) -> Result { - self.data.lock().is_header_finalized.clone() - } - - async fn best_finalized_header_id(&self) -> Result { - self.data.lock().best_finalized_header_id.clone() - } - - async fn filter_transaction_proof(&self, proof: &TestTransactionProof) -> Result { - Ok(self.data.lock().transactions_to_accept.contains(&proof.0)) - } - - async fn submit_transaction_proof(&self, proof: TestTransactionProof) -> Result<(), TestError> { - self.data.lock().submitted_proofs.push(proof); - Ok(()) - } - } - - fn ensure_relay_single_success(source: &TestTransactionsSource, target: &TestTransactionsTarget) { - assert_eq!( - async_std::task::block_on(relay_single_transaction_proof(source, target, test_transaction_hash(0),)), - Ok(()), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(0))], - ); - } - - fn ensure_relay_single_failure(source: TestTransactionsSource, target: TestTransactionsTarget) { - assert!(async_std::task::block_on(relay_single_transaction_proof( - &source, - &target, - test_transaction_hash(0), - )) - .is_err(),); - assert!(target.data.lock().submitted_proofs.is_empty()); - } - - #[test] - fn ready_transaction_proof_relayed_immediately() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_waits_for_transaction_to_be_mined() { - let source = TestTransactionsSource::new(Box::new(|source_data| { - assert_eq!(source_data.transaction_block, Ok(None)); - source_data.transaction_block = Ok(Some((test_block_id(), 0))); - })); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // transaction is not yet mined, but will be available after first wait (tick) - source.data.lock().transaction_block = Ok(None); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_fails_when_transaction_retrieval_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - source.data.lock().transaction_block = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_fails_when_proof_retrieval_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(0), TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_waits_for_header_to_be_imported() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|target_data| { - assert_eq!(target_data.is_header_known, Ok(false)); - target_data.is_header_known = Ok(true); - })); - - // header is not yet imported, but will be available after first wait (tick) - target.data.lock().is_header_known = Ok(false); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_fails_when_is_header_known_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target.data.lock().is_header_known = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_waits_for_header_to_be_finalized() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|target_data| { - assert_eq!(target_data.is_header_finalized, Ok(false)); - target_data.is_header_finalized = Ok(true); - })); - - // header is not yet finalized, but will be available after first wait (tick) - target.data.lock().is_header_finalized = Ok(false); - - ensure_relay_single_success(&source, &target) - } - - #[test] - fn relay_transaction_proof_fails_when_is_header_finalized_fails() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target.data.lock().is_header_finalized = Err(TestError(false)); - - ensure_relay_single_failure(source, target) - } - - #[test] - fn relay_transaction_proof_fails_when_target_node_rejects_proof() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - target - .data - .lock() - .transactions_to_accept - .remove(&test_transaction_hash(0)); - - ensure_relay_single_success(&source, &target) - } - - fn test_relay_block_transactions( - source: &TestTransactionsSource, - target: &TestTransactionsTarget, - pre_relayed: RelayedBlockTransactions, - ) -> Result { - async_std::task::block_on(relay_block_transactions( - source, - target, - &TestBlock( - test_block_id(), - vec![test_transaction(0), test_transaction(1), test_transaction(2)], - ), - pre_relayed, - )) - .map_err(|(_, transactions)| transactions) - } - - #[test] - fn relay_block_transactions_process_all_transactions() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // let's only accept tx#1 - target - .data - .lock() - .transactions_to_accept - .remove(&test_transaction_hash(0)); - target - .data - .lock() - .transactions_to_accept - .insert(test_transaction_hash(1)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { - processed: 3, - relayed: 1, - failed: 0, - }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(1))], - ); - } - - #[test] - fn relay_block_transactions_ignores_transaction_failure() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // let's reject proof for tx#0 - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(0), TestError(false)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { - processed: 3, - relayed: 0, - failed: 1, - }), - ); - assert_eq!(target.data.lock().submitted_proofs, vec![],); - } - - #[test] - fn relay_block_transactions_fails_on_connection_error() { - let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); - - // fail with connection error when preparing proof for tx#1 - source - .data - .lock() - .proofs_to_fail - .insert(test_transaction_hash(1), TestError(true)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); - assert_eq!( - relayed_transactions, - Err(RelayedBlockTransactions { - processed: 1, - relayed: 1, - failed: 0, - }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![TestTransactionProof(test_transaction_hash(0))], - ); - - // now do not fail on tx#2 - source.data.lock().proofs_to_fail.clear(); - // and also relay tx#3 - target - .data - .lock() - .transactions_to_accept - .insert(test_transaction_hash(2)); - - let relayed_transactions = test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err()); - assert_eq!( - relayed_transactions, - Ok(RelayedBlockTransactions { - processed: 3, - relayed: 2, - failed: 0, - }), - ); - assert_eq!( - target.data.lock().submitted_proofs, - vec![ - TestTransactionProof(test_transaction_hash(0)), - TestTransactionProof(test_transaction_hash(2)) - ], - ); - } -} diff --git a/polkadot/bridges/relays/exchange-relay/src/exchange_loop.rs b/polkadot/bridges/relays/exchange-relay/src/exchange_loop.rs deleted file mode 100644 index 06f4d3f40ab015c58a3e009bfe8f3cfc875c8f8e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange-relay/src/exchange_loop.rs +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying proofs of exchange transactions. - -use crate::exchange::{ - relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient, TargetClient, - TransactionProofPipeline, -}; -use crate::exchange_loop_metrics::ExchangeLoopMetrics; - -use backoff::backoff::Backoff; -use futures::{future::FutureExt, select}; -use num_traits::One; -use relay_utils::{ - metrics::{start as metrics_start, GlobalMetrics, MetricsParams}, - retry_backoff, FailedClient, MaybeConnectionError, -}; -use std::future::Future; - -/// Transactions proofs relay state. -#[derive(Debug)] -pub struct TransactionProofsRelayState { - /// Number of last header we have processed so far. - pub best_processed_header_number: BlockNumber, -} - -/// Transactions proofs relay storage. -pub trait TransactionProofsRelayStorage: Clone { - /// Associated block number. - type BlockNumber; - - /// Get relay state. - fn state(&self) -> TransactionProofsRelayState; - /// Update relay state. - fn set_state(&mut self, state: &TransactionProofsRelayState); -} - -/// In-memory storage for auto-relay loop. -#[derive(Debug, Clone)] -pub struct InMemoryStorage { - best_processed_header_number: BlockNumber, -} - -impl InMemoryStorage { - /// Created new in-memory storage with given best processed block number. - pub fn new(best_processed_header_number: BlockNumber) -> Self { - InMemoryStorage { - best_processed_header_number, - } - } -} - -impl TransactionProofsRelayStorage for InMemoryStorage { - type BlockNumber = BlockNumber; - - fn state(&self) -> TransactionProofsRelayState { - TransactionProofsRelayState { - best_processed_header_number: self.best_processed_header_number, - } - } - - fn set_state(&mut self, state: &TransactionProofsRelayState) { - self.best_processed_header_number = state.best_processed_header_number; - } -} - -/// Run proofs synchronization. -pub fn run( - storage: impl TransactionProofsRelayStorage>, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_params: Option, - exit_signal: impl Future, -) { - let exit_signal = exit_signal.shared(); - let metrics_global = GlobalMetrics::default(); - let metrics_exch = ExchangeLoopMetrics::default(); - let metrics_enabled = metrics_params.is_some(); - metrics_start( - format!("{}_to_{}_Exchange", P::SOURCE_NAME, P::TARGET_NAME), - metrics_params, - &metrics_global, - &metrics_exch, - ); - - relay_utils::relay_loop::run( - relay_utils::relay_loop::RECONNECT_DELAY, - source_client, - target_client, - |source_client, target_client| { - run_until_connection_lost( - storage.clone(), - source_client, - target_client, - if metrics_enabled { - Some(metrics_global.clone()) - } else { - None - }, - if metrics_enabled { - Some(metrics_exch.clone()) - } else { - None - }, - exit_signal.clone(), - ) - }, - ); -} - -/// Run proofs synchronization. -async fn run_until_connection_lost( - mut storage: impl TransactionProofsRelayStorage>, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_global: Option, - metrics_exch: Option, - exit_signal: impl Future, -) -> Result<(), FailedClient> { - let mut retry_backoff = retry_backoff(); - let mut state = storage.state(); - let mut current_finalized_block = None; - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!(exit_signal); - - loop { - let iteration_result = run_loop_iteration( - &mut storage, - &source_client, - &target_client, - &mut state, - &mut current_finalized_block, - metrics_exch.as_ref(), - ) - .await; - - if let Some(ref metrics_global) = metrics_global { - metrics_global.update().await; - } - - if let Err((is_connection_error, failed_client)) = iteration_result { - if is_connection_error { - return Err(failed_client); - } - - let retry_timeout = retry_backoff - .next_backoff() - .unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY); - select! { - _ = async_std::task::sleep(retry_timeout).fuse() => {}, - _ = exit_signal => return Ok(()), - } - } else { - retry_backoff.reset(); - - select! { - _ = source_client.tick().fuse() => {}, - _ = exit_signal => return Ok(()), - } - } - } -} - -/// Run exchange loop until we need to break. -async fn run_loop_iteration( - storage: &mut impl TransactionProofsRelayStorage>, - source_client: &impl SourceClient

, - target_client: &impl TargetClient

, - state: &mut TransactionProofsRelayState>, - current_finalized_block: &mut Option<(P::Block, RelayedBlockTransactions)>, - exchange_loop_metrics: Option<&ExchangeLoopMetrics>, -) -> Result<(), (bool, FailedClient)> { - let best_finalized_header_id = match target_client.best_finalized_header_id().await { - Ok(best_finalized_header_id) => { - log::debug!( - target: "bridge", - "Got best finalized {} block from {} node: {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - best_finalized_header_id, - ); - - best_finalized_header_id - } - Err(err) => { - log::error!( - target: "bridge", - "Failed to retrieve best {} header id from {} node: {:?}. Going to retry...", - P::SOURCE_NAME, - P::TARGET_NAME, - err, - ); - - return Err((err.is_connection_error(), FailedClient::Target)); - } - }; - - loop { - // if we already have some finalized block body, try to relay its transactions - if let Some((block, relayed_transactions)) = current_finalized_block.take() { - let result = relay_block_transactions(source_client, target_client, &block, relayed_transactions).await; - - match result { - Ok(relayed_transactions) => { - log::info!( - target: "bridge", - "Relay has processed {} block #{}. Total/Relayed/Failed transactions: {}/{}/{}", - P::SOURCE_NAME, - state.best_processed_header_number, - relayed_transactions.processed, - relayed_transactions.relayed, - relayed_transactions.failed, - ); - - state.best_processed_header_number = state.best_processed_header_number + One::one(); - storage.set_state(state); - - if let Some(ref exchange_loop_metrics) = exchange_loop_metrics { - exchange_loop_metrics.update::

( - state.best_processed_header_number, - best_finalized_header_id.0, - relayed_transactions, - ); - } - - // we have just updated state => proceed to next block retrieval - } - Err((failed_client, relayed_transactions)) => { - *current_finalized_block = Some((block, relayed_transactions)); - return Err((true, failed_client)); - } - } - } - - // we may need to retrieve finalized block body from source node - if best_finalized_header_id.0 > state.best_processed_header_number { - let next_block_number = state.best_processed_header_number + One::one(); - let result = source_client.block_by_number(next_block_number).await; - - match result { - Ok(block) => { - *current_finalized_block = Some((block, RelayedBlockTransactions::default())); - - // we have received new finalized block => go back to relay its transactions - continue; - } - Err(err) => { - log::error!( - target: "bridge", - "Failed to retrieve canonical block #{} from {} node: {:?}. Going to retry...", - next_block_number, - P::SOURCE_NAME, - err, - ); - - return Err((err.is_connection_error(), FailedClient::Source)); - } - } - } - - // there are no any transactions we need to relay => wait for new data - return Ok(()); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::exchange::tests::{ - test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof, TestTransactionsSource, - TestTransactionsTarget, - }; - use futures::{future::FutureExt, stream::StreamExt}; - - #[test] - fn exchange_loop_is_able_to_relay_proofs() { - let storage = InMemoryStorage { - best_processed_header_number: 0, - }; - let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed"))); - let target_data = target.data.clone(); - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - - let source = TestTransactionsSource::new(Box::new(move |data| { - let transaction1_relayed = target_data - .lock() - .submitted_proofs - .contains(&TestTransactionProof(test_transaction_hash(0))); - let transaction2_relayed = target_data - .lock() - .submitted_proofs - .contains(&TestTransactionProof(test_transaction_hash(1))); - match (transaction1_relayed, transaction2_relayed) { - (true, true) => exit_sender.unbounded_send(()).unwrap(), - (true, false) => { - data.block = Ok(test_next_block()); - target_data.lock().best_finalized_header_id = Ok(test_next_block_id()); - target_data - .lock() - .transactions_to_accept - .insert(test_transaction_hash(1)); - } - _ => (), - } - })); - - run( - storage, - source, - target, - None, - exit_receiver.into_future().map(|(_, _)| ()), - ); - } -} diff --git a/polkadot/bridges/relays/exchange-relay/src/exchange_loop_metrics.rs b/polkadot/bridges/relays/exchange-relay/src/exchange_loop_metrics.rs deleted file mode 100644 index bf8f0243b693af37e1c48c8782dd67c2c78a4ce4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange-relay/src/exchange_loop_metrics.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for currency-exchange relay loop. - -use crate::exchange::{BlockNumberOf, RelayedBlockTransactions, TransactionProofPipeline}; -use relay_utils::metrics::{register, Counter, CounterVec, GaugeVec, Metrics, Opts, Registry, U64}; - -/// Exchange transactions relay metrics. -#[derive(Clone)] -pub struct ExchangeLoopMetrics { - /// Best finalized block numbers - "processed" and "known". - best_block_numbers: GaugeVec, - /// Number of processed blocks ("total"). - processed_blocks: Counter, - /// Number of processed transactions ("total", "relayed" and "failed"). - processed_transactions: CounterVec, -} - -impl Metrics for ExchangeLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.best_block_numbers.clone(), registry).map_err(|e| e.to_string())?; - register(self.processed_blocks.clone(), registry).map_err(|e| e.to_string())?; - register(self.processed_transactions.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) - } -} - -impl Default for ExchangeLoopMetrics { - fn default() -> Self { - ExchangeLoopMetrics { - best_block_numbers: GaugeVec::new( - Opts::new("best_block_numbers", "Best finalized block numbers"), - &["type"], - ) - .expect("metric is static and thus valid; qed"), - processed_blocks: Counter::new("processed_blocks", "Total number of processed blocks") - .expect("metric is static and thus valid; qed"), - processed_transactions: CounterVec::new( - Opts::new("processed_transactions", "Total number of processed transactions"), - &["type"], - ) - .expect("metric is static and thus valid; qed"), - } - } -} - -impl ExchangeLoopMetrics { - /// Update metrics when single block is relayed. - pub fn update( - &self, - best_processed_block_number: BlockNumberOf

, - best_known_block_number: BlockNumberOf

, - relayed_transactions: RelayedBlockTransactions, - ) { - self.best_block_numbers - .with_label_values(&["processed"]) - .set(best_processed_block_number.into()); - self.best_block_numbers - .with_label_values(&["known"]) - .set(best_known_block_number.into()); - - self.processed_blocks.inc(); - - self.processed_transactions - .with_label_values(&["total"]) - .inc_by(relayed_transactions.processed as _); - self.processed_transactions - .with_label_values(&["relayed"]) - .inc_by(relayed_transactions.relayed as _); - self.processed_transactions - .with_label_values(&["failed"]) - .inc_by(relayed_transactions.failed as _); - } -} diff --git a/polkadot/bridges/relays/exchange-relay/src/lib.rs b/polkadot/bridges/relays/exchange-relay/src/lib.rs deleted file mode 100644 index f975ef2aa0ff4240172b86a07c6b5dfac803d501..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/exchange-relay/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying [`currency-exchange`](../pallet_bridge_currency_exchange/index.html) application -//! specific data. Currency exchange application allows exchanging tokens between bridged chains. -//! This module provides entrypoints for crafting and submitting (single and multiple) -//! proof-of-exchange-at-source-chain transaction(s) to target chain. - -#![warn(missing_docs)] - -pub mod exchange; -pub mod exchange_loop; -pub mod exchange_loop_metrics; diff --git a/polkadot/bridges/relays/headers-relay/Cargo.toml b/polkadot/bridges/relays/headers-relay/Cargo.toml deleted file mode 100644 index 31d3166a99781a013c8211d7b68586e778e63ffb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "headers-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -futures = "0.3.5" -linked-hash-map = "0.5.3" -log = "0.4.11" -num-traits = "0.2" -parking_lot = "0.11.0" -relay-utils = { path = "../utils" } diff --git a/polkadot/bridges/relays/headers-relay/src/headers.rs b/polkadot/bridges/relays/headers-relay/src/headers.rs deleted file mode 100644 index 99ccc197afc18f88420d60427165c905647850a2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/src/headers.rs +++ /dev/null @@ -1,1721 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Headers queue - the intermediate buffer that is filled when headers are read -//! from the source chain. Headers are removed from the queue once they become -//! known to the target chain. Inside, there are several sub-queues, where headers -//! may stay until source/target chain state isn't updated. When a header reaches the -//! `ready` sub-queue, it may be submitted to the target chain. - -use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader}; - -use linked_hash_map::LinkedHashMap; -use num_traits::{One, Zero}; -use relay_utils::HeaderId; -use std::{ - collections::{btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap, HashSet}, - time::{Duration, Instant}, -}; - -type HeadersQueue

= - BTreeMap<

::Number, HashMap<

::Hash, QueuedHeader

>>; -type SyncedChildren

= - BTreeMap<

::Number, HashMap<

::Hash, HashSet>>>; -type KnownHeaders

= - BTreeMap<

::Number, HashMap<

::Hash, HeaderStatus>>; - -/// We're trying to fetch completion data for single header at this interval. -const RETRY_FETCH_COMPLETION_INTERVAL: Duration = Duration::from_secs(20); - -/// Headers queue. -#[derive(Debug)] -pub struct QueuedHeaders { - /// Headers that are received from source node, but we (native sync code) have - /// never seen their parents. So we need to check if we can/should submit this header. - maybe_orphan: HeadersQueue

, - /// Headers that are received from source node, and we (native sync code) have - /// checked that Substrate runtime doesn't know their parents. So we need to submit parents - /// first. - orphan: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but we need to check - /// whether submission requires extra data to be provided. - maybe_extra: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but we need to retrieve - /// extra data first. - extra: HeadersQueue

, - /// Headers that are ready to be submitted to target node. - ready: HeadersQueue

, - /// Headers that are ready to be submitted to target node, but their ancestor is incomplete. - /// Thus we're waiting for these ancestors to be completed first. - /// Note that the incomplete header itself is synced and it isn't in this queue. - incomplete: HeadersQueue

, - /// Headers that are (we believe) currently submitted to target node by our, - /// not-yet mined transactions. - submitted: HeadersQueue

, - /// Synced headers childrens. We need it to support case when header is synced, but some of - /// its parents are incomplete. - synced_children: SyncedChildren

, - /// Pointers to all headers that we ever seen and we believe we can touch in the future. - known_headers: KnownHeaders

, - /// Headers that are waiting for completion data from source node. Mapped (and auto-sorted - /// by) to the last fetch time. - incomplete_headers: LinkedHashMap, Option>, - /// Headers that are waiting to be completed at target node. Auto-sorted by insertion time. - completion_data: LinkedHashMap, P::Completion>, - /// Best synced block number. - best_synced_number: P::Number, - /// Pruned blocks border. We do not store or accept any blocks with number less than - /// this number. - prune_border: P::Number, -} - -/// Header completion data. -#[derive(Debug)] -struct HeaderCompletion { - /// Last time when we tried to upload completion data to target node, if ever. - pub last_upload_time: Option, - /// Completion data. - pub completion: Completion, -} - -impl Default for QueuedHeaders

{ - fn default() -> Self { - QueuedHeaders { - maybe_orphan: HeadersQueue::new(), - orphan: HeadersQueue::new(), - maybe_extra: HeadersQueue::new(), - extra: HeadersQueue::new(), - ready: HeadersQueue::new(), - incomplete: HeadersQueue::new(), - submitted: HeadersQueue::new(), - synced_children: SyncedChildren::

::new(), - known_headers: KnownHeaders::

::new(), - incomplete_headers: LinkedHashMap::new(), - completion_data: LinkedHashMap::new(), - best_synced_number: Zero::zero(), - prune_border: Zero::zero(), - } - } -} - -impl QueuedHeaders

{ - /// Returns prune border. - #[cfg(test)] - pub fn prune_border(&self) -> P::Number { - self.prune_border - } - - /// Returns number of headers that are currently in given queue. - pub fn headers_in_status(&self, status: HeaderStatus) -> usize { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => 0, - HeaderStatus::MaybeOrphan => self - .maybe_orphan - .values() - .fold(0, |total, headers| total + headers.len()), - HeaderStatus::Orphan => self.orphan.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::MaybeExtra => self - .maybe_extra - .values() - .fold(0, |total, headers| total + headers.len()), - HeaderStatus::Extra => self.extra.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Ready => self.ready.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Incomplete => self.incomplete.values().fold(0, |total, headers| total + headers.len()), - HeaderStatus::Submitted => self.submitted.values().fold(0, |total, headers| total + headers.len()), - } - } - - /// Returns number of headers that are currently in the queue. - pub fn total_headers(&self) -> usize { - self.maybe_orphan - .values() - .fold(0, |total, headers| total + headers.len()) - + self.orphan.values().fold(0, |total, headers| total + headers.len()) - + self - .maybe_extra - .values() - .fold(0, |total, headers| total + headers.len()) - + self.extra.values().fold(0, |total, headers| total + headers.len()) - + self.ready.values().fold(0, |total, headers| total + headers.len()) - + self.incomplete.values().fold(0, |total, headers| total + headers.len()) - } - - /// Returns number of best block in the queue. - pub fn best_queued_number(&self) -> P::Number { - std::cmp::max( - self.maybe_orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.maybe_extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.ready.keys().next_back().cloned().unwrap_or_else(Zero::zero), - std::cmp::max( - self.incomplete.keys().next_back().cloned().unwrap_or_else(Zero::zero), - self.submitted.keys().next_back().cloned().unwrap_or_else(Zero::zero), - ), - ), - ), - ), - ), - ) - } - - /// Returns number of best synced block we have ever seen. It is either less - /// than `best_queued_number()`, or points to last synced block if queue is empty. - pub fn best_synced_number(&self) -> P::Number { - self.best_synced_number - } - - /// Returns synchronization status of the header. - pub fn status(&self, id: &HeaderIdOf

) -> HeaderStatus { - self.known_headers - .get(&id.0) - .and_then(|x| x.get(&id.1)) - .cloned() - .unwrap_or(HeaderStatus::Unknown) - } - - /// Get oldest header from given queue. - pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader

> { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => None, - HeaderStatus::MaybeOrphan => oldest_header(&self.maybe_orphan), - HeaderStatus::Orphan => oldest_header(&self.orphan), - HeaderStatus::MaybeExtra => oldest_header(&self.maybe_extra), - HeaderStatus::Extra => oldest_header(&self.extra), - HeaderStatus::Ready => oldest_header(&self.ready), - HeaderStatus::Incomplete => oldest_header(&self.incomplete), - HeaderStatus::Submitted => oldest_header(&self.submitted), - } - } - - /// Get oldest headers from given queue until functor will return false. - pub fn headers( - &self, - status: HeaderStatus, - f: impl FnMut(&QueuedHeader

) -> bool, - ) -> Option>> { - match status { - HeaderStatus::Unknown | HeaderStatus::Synced => None, - HeaderStatus::MaybeOrphan => oldest_headers(&self.maybe_orphan, f), - HeaderStatus::Orphan => oldest_headers(&self.orphan, f), - HeaderStatus::MaybeExtra => oldest_headers(&self.maybe_extra, f), - HeaderStatus::Extra => oldest_headers(&self.extra, f), - HeaderStatus::Ready => oldest_headers(&self.ready, f), - HeaderStatus::Incomplete => oldest_headers(&self.incomplete, f), - HeaderStatus::Submitted => oldest_headers(&self.submitted, f), - } - } - - /// Appends new header, received from the source node, to the queue. - pub fn header_response(&mut self, header: P::Header) { - let id = header.id(); - let status = self.status(&id); - if status != HeaderStatus::Unknown { - log::debug!( - target: "bridge", - "Ignoring new {} header: {:?}. Status is {:?}.", - P::SOURCE_NAME, - id, - status, - ); - return; - } - - if id.0 < self.prune_border { - log::debug!( - target: "bridge", - "Ignoring ancient new {} header: {:?}.", - P::SOURCE_NAME, - id, - ); - return; - } - - let parent_id = header.parent_id(); - let parent_status = self.status(&parent_id); - let header = QueuedHeader::new(header); - - let status = match parent_status { - HeaderStatus::Unknown | HeaderStatus::MaybeOrphan => { - insert_header(&mut self.maybe_orphan, id, header); - HeaderStatus::MaybeOrphan - } - HeaderStatus::Orphan => { - insert_header(&mut self.orphan, id, header); - HeaderStatus::Orphan - } - HeaderStatus::MaybeExtra - | HeaderStatus::Extra - | HeaderStatus::Ready - | HeaderStatus::Incomplete - | HeaderStatus::Submitted - | HeaderStatus::Synced => { - insert_header(&mut self.maybe_extra, id, header); - HeaderStatus::MaybeExtra - } - }; - - self.known_headers.entry(id.0).or_default().insert(id.1, status); - log::debug!( - target: "bridge", - "Queueing new {} header: {:?}. Queue: {:?}.", - P::SOURCE_NAME, - id, - status, - ); - } - - /// Receive best header from the target node. - pub fn target_best_header_response(&mut self, id: &HeaderIdOf

) { - self.header_synced(id) - } - - /// Receive target node response for MaybeOrphan request. - pub fn maybe_orphan_response(&mut self, id: &HeaderIdOf

, response: bool) { - if !response { - move_header_descendants::

( - &mut [&mut self.maybe_orphan], - &mut self.orphan, - &mut self.known_headers, - HeaderStatus::Orphan, - &id, - ); - return; - } - - move_header_descendants::

( - &mut [&mut self.maybe_orphan, &mut self.orphan], - &mut self.maybe_extra, - &mut self.known_headers, - HeaderStatus::MaybeExtra, - &id, - ); - } - - /// Receive target node response for MaybeExtra request. - pub fn maybe_extra_response(&mut self, id: &HeaderIdOf

, response: bool) { - let (destination_status, destination_queue) = if response { - (HeaderStatus::Extra, &mut self.extra) - } else if self.is_parent_incomplete(id) { - (HeaderStatus::Incomplete, &mut self.incomplete) - } else { - (HeaderStatus::Ready, &mut self.ready) - }; - - move_header( - &mut self.maybe_extra, - destination_queue, - &mut self.known_headers, - destination_status, - &id, - |header| header, - ); - } - - /// Receive extra from source node. - pub fn extra_response(&mut self, id: &HeaderIdOf

, extra: P::Extra) { - let (destination_status, destination_queue) = if self.is_parent_incomplete(id) { - (HeaderStatus::Incomplete, &mut self.incomplete) - } else { - (HeaderStatus::Ready, &mut self.ready) - }; - - // move header itself from extra to ready queue - move_header( - &mut self.extra, - destination_queue, - &mut self.known_headers, - destination_status, - id, - |header| header.set_extra(extra), - ); - } - - /// Receive completion response from source node. - pub fn completion_response(&mut self, id: &HeaderIdOf

, completion: Option) { - let completion = match completion { - Some(completion) => completion, - None => { - log::debug!( - target: "bridge", - "{} Node is still missing completion data for header: {:?}. Will retry later.", - P::SOURCE_NAME, - id, - ); - - return; - } - }; - - // do not remove from `incomplete_headers` here, because otherwise we'll miss - // completion 'notification' - // this could lead to duplicate completion retrieval (if completion transaction isn't mined - // for too long) - // - // instead, we're moving entry to the end of the queue, so that completion data won't be - // refetched instantly - if self.incomplete_headers.remove(id).is_some() { - log::debug!( - target: "bridge", - "Received completion data from {} for header: {:?}", - P::SOURCE_NAME, - id, - ); - - self.completion_data.insert(*id, completion); - self.incomplete_headers.insert(*id, Some(Instant::now())); - } - } - - /// When header is submitted to target node. - pub fn headers_submitted(&mut self, ids: Vec>) { - for id in ids { - move_header( - &mut self.ready, - &mut self.submitted, - &mut self.known_headers, - HeaderStatus::Submitted, - &id, - |header| header, - ); - } - } - - /// When header completion data is sent to target node. - pub fn header_completed(&mut self, id: &HeaderIdOf

) { - if self.completion_data.remove(id).is_some() { - log::debug!( - target: "bridge", - "Sent completion data to {} for header: {:?}", - P::TARGET_NAME, - id, - ); - - // transaction can be dropped by target chain nodes => it would never be mined - // - // in current implementation the sync loop would wait for some time && if best - // **source** header won't change on **target** node, then the sync will be restarted - // => we'll resubmit the same completion data again (the same is true for submitted - // headers) - // - // the other option would be to track emitted transactions at least on target node, - // but it won't give us 100% guarantee anyway - // - // => we're just dropping completion data just after it has been submitted - } - } - - /// Marks given headers incomplete. - pub fn add_incomplete_headers(&mut self, make_header_incomplete: bool, new_incomplete_headers: Vec>) { - for new_incomplete_header in new_incomplete_headers { - if make_header_incomplete { - self.header_synced(&new_incomplete_header); - } - - let move_origins = select_synced_children::

(&self.synced_children, &new_incomplete_header); - let move_origins = move_origins.into_iter().chain(std::iter::once(new_incomplete_header)); - for move_origin in move_origins { - move_header_descendants::

( - &mut [&mut self.ready, &mut self.submitted], - &mut self.incomplete, - &mut self.known_headers, - HeaderStatus::Incomplete, - &move_origin, - ); - } - - if make_header_incomplete { - log::debug!( - target: "bridge", - "Scheduling completion data retrieval for header: {:?}", - new_incomplete_header, - ); - - self.incomplete_headers.insert(new_incomplete_header, None); - } - } - } - - /// When incomplete headers ids are receved from target node. - pub fn incomplete_headers_response(&mut self, ids: HashSet>) { - // all new incomplete headers are marked Synced and all their descendants - // are moved from Ready/Submitted to Incomplete queue - let new_incomplete_headers = ids - .iter() - .filter(|id| !self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id)) - .cloned() - .collect::>(); - self.add_incomplete_headers(true, new_incomplete_headers); - - // for all headers that were incompleted previously, but now are completed, we move - // all descendants from incomplete to ready - let just_completed_headers = self - .incomplete_headers - .keys() - .chain(self.completion_data.keys()) - .filter(|id| !ids.contains(id)) - .cloned() - .collect::>(); - for just_completed_header in just_completed_headers { - // sub2eth rejects H if H.Parent is incomplete - // sub2sub allows 'syncing' headers like that - // => let's check if there are some synced children of just completed header - let move_origins = select_synced_children::

(&self.synced_children, &just_completed_header); - let move_origins = move_origins.into_iter().chain(std::iter::once(just_completed_header)); - for move_origin in move_origins { - move_header_descendants::

( - &mut [&mut self.incomplete], - &mut self.ready, - &mut self.known_headers, - HeaderStatus::Ready, - &move_origin, - ); - } - - log::debug!( - target: "bridge", - "Completion data is no longer required for header: {:?}", - just_completed_header, - ); - - self.incomplete_headers.remove(&just_completed_header); - self.completion_data.remove(&just_completed_header); - } - } - - /// Returns true if given header requires completion data. - pub fn requires_completion_data(&self, id: &HeaderIdOf

) -> bool { - self.incomplete_headers.contains_key(id) - } - - /// Returns id of the header for which we want to fetch completion data. - pub fn incomplete_header(&mut self) -> Option> { - queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| { - let retry = match *last_fetch_time { - Some(last_fetch_time) => last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL, - None => true, - }; - - if retry { - *last_fetch_time = Some(Instant::now()); - } - - retry - }) - .map(|(id, _)| id) - } - - /// Returns header completion data to upload to target node. - pub fn header_to_complete(&mut self) -> Option<(HeaderIdOf

, &P::Completion)> { - queued_incomplete_header(&mut self.completion_data, |_| true) - } - - /// Prune and never accept headers before this block. - pub fn prune(&mut self, prune_border: P::Number) { - if prune_border <= self.prune_border { - return; - } - - prune_queue(&mut self.maybe_orphan, prune_border); - prune_queue(&mut self.orphan, prune_border); - prune_queue(&mut self.maybe_extra, prune_border); - prune_queue(&mut self.extra, prune_border); - prune_queue(&mut self.ready, prune_border); - prune_queue(&mut self.submitted, prune_border); - prune_queue(&mut self.incomplete, prune_border); - self.synced_children = self.synced_children.split_off(&prune_border); - prune_known_headers::

(&mut self.known_headers, prune_border); - self.prune_border = prune_border; - } - - /// Forgets all ever known headers. - pub fn clear(&mut self) { - self.maybe_orphan.clear(); - self.orphan.clear(); - self.maybe_extra.clear(); - self.extra.clear(); - self.ready.clear(); - self.incomplete.clear(); - self.submitted.clear(); - self.synced_children.clear(); - self.known_headers.clear(); - self.best_synced_number = Zero::zero(); - self.prune_border = Zero::zero(); - } - - /// Returns true if parent of this header is either incomplete or waiting for - /// its own incomplete ancestor to be completed. - fn is_parent_incomplete(&self, id: &HeaderIdOf

) -> bool { - let status = self.status(id); - let header = match status { - HeaderStatus::MaybeOrphan => header(&self.maybe_orphan, id), - HeaderStatus::Orphan => header(&self.orphan, id), - HeaderStatus::MaybeExtra => header(&self.maybe_extra, id), - HeaderStatus::Extra => header(&self.extra, id), - HeaderStatus::Ready => header(&self.ready, id), - HeaderStatus::Incomplete => header(&self.incomplete, id), - HeaderStatus::Submitted => header(&self.submitted, id), - HeaderStatus::Unknown => return false, - HeaderStatus::Synced => return false, - }; - - match header { - Some(header) => { - let parent_id = header.header().parent_id(); - self.incomplete_headers.contains_key(&parent_id) - || self.completion_data.contains_key(&parent_id) - || self.status(&parent_id) == HeaderStatus::Incomplete - } - None => false, - } - } - - /// When we receive new Synced header from target node. - fn header_synced(&mut self, id: &HeaderIdOf

) { - // update best synced block number - self.best_synced_number = std::cmp::max(self.best_synced_number, id.0); - - // all ancestors of this header are now synced => let's remove them from - // queues - let mut current = *id; - let mut id_processed = false; - let mut previous_current = None; - loop { - let header = match self.status(¤t) { - HeaderStatus::Unknown => break, - HeaderStatus::MaybeOrphan => remove_header(&mut self.maybe_orphan, ¤t), - HeaderStatus::Orphan => remove_header(&mut self.orphan, ¤t), - HeaderStatus::MaybeExtra => remove_header(&mut self.maybe_extra, ¤t), - HeaderStatus::Extra => remove_header(&mut self.extra, ¤t), - HeaderStatus::Ready => remove_header(&mut self.ready, ¤t), - HeaderStatus::Incomplete => remove_header(&mut self.incomplete, ¤t), - HeaderStatus::Submitted => remove_header(&mut self.submitted, ¤t), - HeaderStatus::Synced => break, - } - .expect("header has a given status; given queue has the header; qed"); - - // remember ids of all the children of the current header - let synced_children_entry = self - .synced_children - .entry(current.0) - .or_default() - .entry(current.1) - .or_default(); - let all_queues = [ - &self.maybe_orphan, - &self.orphan, - &self.maybe_extra, - &self.extra, - &self.ready, - &self.incomplete, - &self.submitted, - ]; - for queue in &all_queues { - let children_from_queue = queue - .get(&(current.0 + One::one())) - .map(|potential_children| { - potential_children - .values() - .filter(|potential_child| potential_child.header().parent_id() == current) - .map(|child| child.id()) - .collect::>() - }) - .unwrap_or_default(); - synced_children_entry.extend(children_from_queue); - } - if let Some(previous_current) = previous_current { - synced_children_entry.insert(previous_current); - } - - set_header_status::

(&mut self.known_headers, ¤t, HeaderStatus::Synced); - - previous_current = Some(current); - current = header.parent_id(); - id_processed = true; - } - - // remember that the header itself is synced - // (condition is here to avoid duplicate log messages) - if !id_processed { - set_header_status::

(&mut self.known_headers, &id, HeaderStatus::Synced); - } - - // now let's move all descendants from maybe_orphan && orphan queues to - // maybe_extra queue - move_header_descendants::

( - &mut [&mut self.maybe_orphan, &mut self.orphan], - &mut self.maybe_extra, - &mut self.known_headers, - HeaderStatus::MaybeExtra, - id, - ); - } -} - -/// Insert header to the queue. -fn insert_header(queue: &mut HeadersQueue

, id: HeaderIdOf

, header: QueuedHeader

) { - queue.entry(id.0).or_default().insert(id.1, header); -} - -/// Remove header from the queue. -fn remove_header(queue: &mut HeadersQueue

, id: &HeaderIdOf

) -> Option> { - let mut headers_at = match queue.entry(id.0) { - BTreeMapEntry::Occupied(headers_at) => headers_at, - BTreeMapEntry::Vacant(_) => return None, - }; - - let header = headers_at.get_mut().remove(&id.1); - if headers_at.get().is_empty() { - headers_at.remove(); - } - header -} - -/// Get header from the queue. -fn header<'a, P: HeadersSyncPipeline>(queue: &'a HeadersQueue

, id: &HeaderIdOf

) -> Option<&'a QueuedHeader

> { - queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1)) -} - -/// Move header from source to destination queue. -/// -/// Returns ID of parent header, if header has been moved, or None otherwise. -fn move_header( - source_queue: &mut HeadersQueue

, - destination_queue: &mut HeadersQueue

, - known_headers: &mut KnownHeaders

, - destination_status: HeaderStatus, - id: &HeaderIdOf

, - prepare: impl FnOnce(QueuedHeader

) -> QueuedHeader

, -) -> Option> { - let header = match remove_header(source_queue, id) { - Some(header) => prepare(header), - None => return None, - }; - - let parent_id = header.header().parent_id(); - destination_queue.entry(id.0).or_default().insert(id.1, header); - set_header_status::

(known_headers, id, destination_status); - - Some(parent_id) -} - -/// Move all descendant headers from the source to destination queue. -fn move_header_descendants( - source_queues: &mut [&mut HeadersQueue

], - destination_queue: &mut HeadersQueue

, - known_headers: &mut KnownHeaders

, - destination_status: HeaderStatus, - id: &HeaderIdOf

, -) { - let mut current_number = id.0 + One::one(); - let mut current_parents = HashSet::new(); - current_parents.insert(id.1); - - while !current_parents.is_empty() { - let mut next_parents = HashSet::new(); - for source_queue in source_queues.iter_mut() { - let mut source_entry = match source_queue.entry(current_number) { - BTreeMapEntry::Occupied(source_entry) => source_entry, - BTreeMapEntry::Vacant(_) => continue, - }; - - let mut headers_to_move = Vec::new(); - let children_at_number = source_entry.get().keys().cloned().collect::>(); - for key in children_at_number { - let entry = match source_entry.get_mut().entry(key) { - HashMapEntry::Occupied(entry) => entry, - HashMapEntry::Vacant(_) => unreachable!("iterating existing keys; qed"), - }; - - if current_parents.contains(&entry.get().header().parent_id().1) { - let header_to_move = entry.remove(); - let header_to_move_id = header_to_move.id(); - headers_to_move.push((header_to_move_id, header_to_move)); - set_header_status::

(known_headers, &header_to_move_id, destination_status); - } - } - - if source_entry.get().is_empty() { - source_entry.remove(); - } - - next_parents.extend(headers_to_move.iter().map(|(id, _)| id.1)); - - destination_queue - .entry(current_number) - .or_default() - .extend(headers_to_move.into_iter().map(|(id, h)| (id.1, h))) - } - - current_number = current_number + One::one(); - std::mem::swap(&mut current_parents, &mut next_parents); - } -} - -/// Selects (recursive) all synced children of given header. -fn select_synced_children( - synced_children: &SyncedChildren

, - id: &HeaderIdOf

, -) -> Vec> { - let mut result = Vec::new(); - let mut current_parents = HashSet::new(); - current_parents.insert(*id); - - while !current_parents.is_empty() { - let mut next_parents = HashSet::new(); - for current_parent in ¤t_parents { - let current_parent_synced_children = synced_children - .get(¤t_parent.0) - .and_then(|by_number_entry| by_number_entry.get(¤t_parent.1)); - if let Some(current_parent_synced_children) = current_parent_synced_children { - for current_parent_synced_child in current_parent_synced_children { - result.push(*current_parent_synced_child); - next_parents.insert(*current_parent_synced_child); - } - } - } - - let _ = std::mem::replace(&mut current_parents, next_parents); - } - - result -} - -/// Return oldest header from the queue. -fn oldest_header(queue: &HeadersQueue

) -> Option<&QueuedHeader

> { - queue.values().flat_map(|h| h.values()).next() -} - -/// Return oldest headers from the queue until functor will return false. -fn oldest_headers( - queue: &HeadersQueue

, - mut f: impl FnMut(&QueuedHeader

) -> bool, -) -> Option>> { - let result = queue - .values() - .flat_map(|h| h.values()) - .take_while(|h| f(h)) - .collect::>(); - if result.is_empty() { - None - } else { - Some(result) - } -} - -/// Forget all headers with number less than given. -fn prune_queue(queue: &mut HeadersQueue

, prune_border: P::Number) { - *queue = queue.split_off(&prune_border); -} - -/// Forget all known headers with number less than given. -fn prune_known_headers(known_headers: &mut KnownHeaders

, prune_border: P::Number) { - let new_known_headers = known_headers.split_off(&prune_border); - for (pruned_number, pruned_headers) in &*known_headers { - for pruned_hash in pruned_headers.keys() { - log::debug!(target: "bridge", "Pruning header {:?}.", HeaderId(*pruned_number, *pruned_hash)); - } - } - *known_headers = new_known_headers; -} - -/// Change header status. -fn set_header_status( - known_headers: &mut KnownHeaders

, - id: &HeaderIdOf

, - status: HeaderStatus, -) { - log::debug!( - target: "bridge", - "{} header {:?} is now {:?}", - P::SOURCE_NAME, - id, - status, - ); - *known_headers.entry(id.0).or_default().entry(id.1).or_insert(status) = status; -} - -/// Returns queued incomplete header with maximal elapsed time since last update. -fn queued_incomplete_header( - map: &mut LinkedHashMap, - filter: impl FnMut(&mut T) -> bool, -) -> Option<(Id, &T)> { - // TODO (#84): headers that have been just appended to the end of the queue would have to wait until - // all previous headers will be retried - - let retry_old_header = map - .front() - .map(|(key, _)| key.clone()) - .and_then(|key| map.get_mut(&key).map(filter)) - .unwrap_or(false); - if retry_old_header { - let (header_key, header) = map.pop_front().expect("we have checked that front() exists; qed"); - map.insert(header_key, header); - return map.back().map(|(id, data)| (id.clone(), data)); - } - - None -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::sync_loop_tests::{TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber}; - use crate::sync_types::QueuedHeader; - - pub(crate) fn header(number: TestNumber) -> QueuedHeader { - QueuedHeader::new(TestHeader { - number, - hash: hash(number), - parent_hash: hash(number - 1), - }) - } - - pub(crate) fn hash(number: TestNumber) -> TestHash { - number - } - - pub(crate) fn id(number: TestNumber) -> TestHeaderId { - HeaderId(number, hash(number)) - } - - #[test] - fn total_headers_works() { - // total headers just sums up number of headers in every queue - let mut queue = QueuedHeaders::::default(); - queue.maybe_orphan.entry(1).or_default().insert( - hash(1), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(1).or_default().insert( - hash(2), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(2).or_default().insert( - hash(3), - QueuedHeader::::new(Default::default()), - ); - queue.orphan.entry(3).or_default().insert( - hash(4), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_extra.entry(4).or_default().insert( - hash(5), - QueuedHeader::::new(Default::default()), - ); - queue.ready.entry(5).or_default().insert( - hash(6), - QueuedHeader::::new(Default::default()), - ); - queue.incomplete.entry(6).or_default().insert( - hash(7), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.total_headers(), 7); - } - - #[test] - fn best_queued_number_works() { - // initially there are headers in MaybeOrphan queue only - let mut queue = QueuedHeaders::::default(); - queue.maybe_orphan.entry(1).or_default().insert( - hash(1), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(1).or_default().insert( - hash(2), - QueuedHeader::::new(Default::default()), - ); - queue.maybe_orphan.entry(3).or_default().insert( - hash(3), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 3); - // and then there's better header in Orphan - queue.orphan.entry(10).or_default().insert( - hash(10), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 10); - // and then there's better header in MaybeExtra - queue.maybe_extra.entry(20).or_default().insert( - hash(20), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 20); - // and then there's better header in Ready - queue.ready.entry(30).or_default().insert( - hash(30), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 30); - // and then there's better header in MaybeOrphan again - queue.maybe_orphan.entry(40).or_default().insert( - hash(40), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 40); - // and then there's some header in Incomplete - queue.incomplete.entry(50).or_default().insert( - hash(50), - QueuedHeader::::new(Default::default()), - ); - assert_eq!(queue.best_queued_number(), 50); - } - - #[test] - fn status_works() { - // all headers are unknown initially - let mut queue = QueuedHeaders::::default(); - assert_eq!(queue.status(&id(10)), HeaderStatus::Unknown); - // and status is read from the KnownHeaders - queue - .known_headers - .entry(10) - .or_default() - .insert(hash(10), HeaderStatus::Ready); - assert_eq!(queue.status(&id(10)), HeaderStatus::Ready); - } - - #[test] - fn header_works() { - // initially we have oldest header #10 - let mut queue = QueuedHeaders::::default(); - queue.maybe_orphan.entry(10).or_default().insert(hash(1), header(100)); - assert_eq!( - queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, - hash(100) - ); - // inserting #20 changes nothing - queue.maybe_orphan.entry(20).or_default().insert(hash(1), header(101)); - assert_eq!( - queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, - hash(100) - ); - // inserting #5 makes it oldest - queue.maybe_orphan.entry(5).or_default().insert(hash(1), header(102)); - assert_eq!( - queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, - hash(102) - ); - } - - #[test] - fn header_response_works() { - // when parent is Synced, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Synced); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Ready, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Receipts, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Extra); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is MaybeExtra, we insert to MaybeExtra - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); - - // when parent is Orphan, we insert to Orphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Orphan); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::Orphan); - - // when parent is MaybeOrphan, we insert to MaybeOrphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); - - // when parent is unknown, we insert to MaybeOrphan - let mut queue = QueuedHeaders::::default(); - queue.header_response(header(101).header().clone()); - assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); - } - - #[test] - fn ancestors_are_synced_on_substrate_best_header_response() { - // let's say someone else has submitted transaction to bridge that changes - // its best block to #100. At this time we have: - // #100 in MaybeOrphan - // #99 in Orphan - // #98 in MaybeExtra - // #97 in Receipts - // #96 in Ready - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(100) - .or_default() - .insert(hash(100), header(100)); - queue - .known_headers - .entry(99) - .or_default() - .insert(hash(99), HeaderStatus::Orphan); - queue.orphan.entry(99).or_default().insert(hash(99), header(99)); - queue - .known_headers - .entry(98) - .or_default() - .insert(hash(98), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(98).or_default().insert(hash(98), header(98)); - queue - .known_headers - .entry(97) - .or_default() - .insert(hash(97), HeaderStatus::Extra); - queue.extra.entry(97).or_default().insert(hash(97), header(97)); - queue - .known_headers - .entry(96) - .or_default() - .insert(hash(96), HeaderStatus::Ready); - queue.ready.entry(96).or_default().insert(hash(96), header(96)); - queue.target_best_header_response(&id(100)); - - // then the #100 and all ancestors of #100 (#96..#99) are treated as synced - assert!(queue.maybe_orphan.is_empty()); - assert!(queue.orphan.is_empty()); - assert!(queue.maybe_extra.is_empty()); - assert!(queue.extra.is_empty()); - assert!(queue.ready.is_empty()); - assert_eq!(queue.known_headers.len(), 5); - assert!(queue - .known_headers - .values() - .all(|s| s.values().all(|s| *s == HeaderStatus::Synced))); - - // children of synced headers are stored - assert_eq!( - vec![id(97)], - queue.synced_children[&96][&hash(96)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!( - vec![id(98)], - queue.synced_children[&97][&hash(97)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!( - vec![id(99)], - queue.synced_children[&98][&hash(98)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!( - vec![id(100)], - queue.synced_children[&99][&hash(99)] - .iter() - .cloned() - .collect::>() - ); - assert_eq!(0, queue.synced_children[&100][&hash(100)].len()); - } - - #[test] - fn descendants_are_moved_on_substrate_best_header_response() { - // let's say someone else has submitted transaction to bridge that changes - // its best block to #100. At this time we have: - // #101 in Orphan - // #102 in MaybeOrphan - // #103 in Orphan - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Orphan); - queue.orphan.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(102) - .or_default() - .insert(hash(102), header(102)); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Orphan); - queue.orphan.entry(103).or_default().insert(hash(103), header(103)); - queue.target_best_header_response(&id(100)); - - // all descendants are moved to MaybeExtra - assert!(queue.maybe_orphan.is_empty()); - assert!(queue.orphan.is_empty()); - assert_eq!(queue.maybe_extra.len(), 3); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&103][&hash(103)], HeaderStatus::MaybeExtra); - } - - #[test] - fn positive_maybe_orphan_response_works() { - // let's say we have: - // #100 in MaybeOrphan - // #101 in Orphan - // #102 in MaybeOrphan - // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) - // and the response is: YES, #99 is known to the Substrate runtime - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(100) - .or_default() - .insert(hash(100), header(100)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Orphan); - queue.orphan.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(102) - .or_default() - .insert(hash(102), header(102)); - queue.maybe_orphan_response(&id(99), true); - - // then all headers (#100..#103) are moved to the MaybeExtra queue - assert!(queue.orphan.is_empty()); - assert!(queue.maybe_orphan.is_empty()); - assert_eq!(queue.maybe_extra.len(), 3); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); - assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); - } - - #[test] - fn negative_maybe_orphan_response_works() { - // let's say we have: - // #100 in MaybeOrphan - // #101 in MaybeOrphan - // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) - // and the response is: NO, #99 is NOT known to the Substrate runtime - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(100) - .or_default() - .insert(hash(100), header(100)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(101) - .or_default() - .insert(hash(101), header(101)); - queue.maybe_orphan_response(&id(99), false); - - // then all headers (#100..#101) are moved to the Orphan queue - assert!(queue.maybe_orphan.is_empty()); - assert_eq!(queue.orphan.len(), 2); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Orphan); - assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::Orphan); - } - - #[test] - fn positive_maybe_extra_response_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); - queue.maybe_extra_response(&id(100), true); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.extra.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Extra); - } - - #[test] - fn negative_maybe_extra_response_works() { - // when parent header is complete - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); - queue.maybe_extra_response(&id(100), false); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.ready.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); - - // when parent header is incomplete - queue.incomplete_headers.insert(id(200), None); - queue - .known_headers - .entry(201) - .or_default() - .insert(hash(201), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(201).or_default().insert(hash(201), header(201)); - queue.maybe_extra_response(&id(201), false); - assert!(queue.maybe_extra.is_empty()); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); - } - - #[test] - fn receipts_response_works() { - // when parent header is complete - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Extra); - queue.extra.entry(100).or_default().insert(hash(100), header(100)); - queue.extra_response(&id(100), 100_100); - assert!(queue.extra.is_empty()); - assert_eq!(queue.ready.len(), 1); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); - - // when parent header is incomplete - queue.incomplete_headers.insert(id(200), None); - queue - .known_headers - .entry(201) - .or_default() - .insert(hash(201), HeaderStatus::Extra); - queue.extra.entry(201).or_default().insert(hash(201), header(201)); - queue.extra_response(&id(201), 201_201); - assert!(queue.extra.is_empty()); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); - } - - #[test] - fn header_submitted_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.ready.entry(100).or_default().insert(hash(100), header(100)); - queue.headers_submitted(vec![id(100)]); - assert!(queue.ready.is_empty()); - assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Submitted); - } - - #[test] - fn incomplete_header_works() { - let mut queue = QueuedHeaders::::default(); - - // nothing to complete if queue is empty - assert_eq!(queue.incomplete_header(), None); - - // when there's new header to complete => ask for completion data - queue.incomplete_headers.insert(id(100), None); - assert_eq!(queue.incomplete_header(), Some(id(100))); - - // we have just asked for completion data => nothing to request - assert_eq!(queue.incomplete_header(), None); - - // enough time have passed => ask again - queue.incomplete_headers.clear(); - queue.incomplete_headers.insert( - id(100), - Some(Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL), - ); - assert_eq!(queue.incomplete_header(), Some(id(100))); - } - - #[test] - fn completion_response_works() { - let mut queue = QueuedHeaders::::default(); - queue.incomplete_headers.insert(id(100), None); - queue.incomplete_headers.insert(id(200), Some(Instant::now())); - queue.incomplete_headers.insert(id(300), Some(Instant::now())); - - // when header isn't incompete, nothing changes - queue.completion_response(&id(400), None); - assert_eq!(queue.incomplete_headers.len(), 3); - assert_eq!(queue.completion_data.len(), 0); - assert_eq!(queue.header_to_complete(), None); - - // when response is None, nothing changes - queue.completion_response(&id(100), None); - assert_eq!(queue.incomplete_headers.len(), 3); - assert_eq!(queue.completion_data.len(), 0); - assert_eq!(queue.header_to_complete(), None); - - // when response is Some, we're scheduling completion - queue.completion_response(&id(200), Some(200_200)); - assert_eq!(queue.completion_data.len(), 1); - assert!(queue.completion_data.contains_key(&id(200))); - assert_eq!(queue.header_to_complete(), Some((id(200), &200_200))); - assert_eq!( - queue.incomplete_headers.keys().collect::>(), - vec![&id(100), &id(300), &id(200)], - ); - } - - #[test] - fn header_completed_works() { - let mut queue = QueuedHeaders::::default(); - queue.completion_data.insert(id(100), 100_100); - - // when unknown header is completed - queue.header_completed(&id(200)); - assert_eq!(queue.completion_data.len(), 1); - - // when known header is completed - queue.header_completed(&id(100)); - assert_eq!(queue.completion_data.len(), 0); - } - - #[test] - fn incomplete_headers_response_works() { - let mut queue = QueuedHeaders::::default(); - - // when we have already submitted #101 and #102 is ready - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Ready); - queue.submitted.entry(102).or_default().insert(hash(102), header(102)); - - // AND now we know that the #100 is incomplete - queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); - - // => #101 and #102 are moved to the Incomplete and #100 is now synced - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Incomplete); - assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); - assert_eq!(queue.submitted.len(), 0); - assert_eq!(queue.ready.len(), 0); - assert!(queue.incomplete.entry(101).or_default().contains_key(&hash(101))); - assert!(queue.incomplete.entry(102).or_default().contains_key(&hash(102))); - assert!(queue.incomplete_headers.contains_key(&id(100))); - assert!(queue.completion_data.is_empty()); - - // and then header #100 is no longer incomplete - queue.incomplete_headers_response(vec![].into_iter().collect()); - - // => #101 and #102 are moved to the Ready queue and #100 if now forgotten - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Ready); - assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); - assert_eq!(queue.incomplete.len(), 0); - assert_eq!(queue.submitted.len(), 0); - assert!(queue.ready.entry(101).or_default().contains_key(&hash(101))); - assert!(queue.ready.entry(102).or_default().contains_key(&hash(102))); - assert!(queue.incomplete_headers.is_empty()); - assert!(queue.completion_data.is_empty()); - } - - #[test] - fn is_parent_incomplete_works() { - let mut queue = QueuedHeaders::::default(); - - // when we do not know header itself - assert_eq!(queue.is_parent_incomplete(&id(50)), false); - - // when we do not know parent - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Incomplete); - queue.incomplete.entry(100).or_default().insert(hash(100), header(100)); - assert_eq!(queue.is_parent_incomplete(&id(100)), false); - - // when parent is inside incomplete queue (i.e. some other ancestor is actually incomplete) - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - assert_eq!(queue.is_parent_incomplete(&id(101)), true); - - // when parent is the incomplete header and we do not have completion data - queue.incomplete_headers.insert(id(199), None); - queue - .known_headers - .entry(200) - .or_default() - .insert(hash(200), HeaderStatus::Submitted); - queue.submitted.entry(200).or_default().insert(hash(200), header(200)); - assert_eq!(queue.is_parent_incomplete(&id(200)), true); - - // when parent is the incomplete header and we have completion data - queue.completion_data.insert(id(299), 299_299); - queue - .known_headers - .entry(300) - .or_default() - .insert(hash(300), HeaderStatus::Submitted); - queue.submitted.entry(300).or_default().insert(hash(300), header(300)); - assert_eq!(queue.is_parent_incomplete(&id(300)), true); - } - - #[test] - fn prune_works() { - let mut queue = QueuedHeaders::::default(); - queue - .known_headers - .entry(105) - .or_default() - .insert(hash(105), HeaderStatus::Incomplete); - queue.incomplete.entry(105).or_default().insert(hash(105), header(105)); - queue - .known_headers - .entry(104) - .or_default() - .insert(hash(104), HeaderStatus::MaybeOrphan); - queue - .maybe_orphan - .entry(104) - .or_default() - .insert(hash(104), header(104)); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Orphan); - queue.orphan.entry(103).or_default().insert(hash(103), header(103)); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::MaybeExtra); - queue.maybe_extra.entry(102).or_default().insert(hash(102), header(102)); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Extra); - queue.extra.entry(101).or_default().insert(hash(101), header(101)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Ready); - queue.ready.entry(100).or_default().insert(hash(100), header(100)); - queue - .synced_children - .entry(100) - .or_default() - .insert(hash(100), vec![id(101)].into_iter().collect()); - queue - .synced_children - .entry(102) - .or_default() - .insert(hash(102), vec![id(102)].into_iter().collect()); - - queue.prune(102); - - assert_eq!(queue.ready.len(), 0); - assert_eq!(queue.extra.len(), 0); - assert_eq!(queue.maybe_extra.len(), 1); - assert_eq!(queue.orphan.len(), 1); - assert_eq!(queue.maybe_orphan.len(), 1); - assert_eq!(queue.incomplete.len(), 1); - assert_eq!(queue.synced_children.len(), 1); - assert_eq!(queue.known_headers.len(), 4); - - queue.prune(110); - - assert_eq!(queue.ready.len(), 0); - assert_eq!(queue.extra.len(), 0); - assert_eq!(queue.maybe_extra.len(), 0); - assert_eq!(queue.orphan.len(), 0); - assert_eq!(queue.maybe_orphan.len(), 0); - assert_eq!(queue.incomplete.len(), 0); - assert_eq!(queue.synced_children.len(), 0); - assert_eq!(queue.known_headers.len(), 0); - - queue.header_response(header(109).header().clone()); - assert_eq!(queue.known_headers.len(), 0); - - queue.header_response(header(110).header().clone()); - assert_eq!(queue.known_headers.len(), 1); - } - - #[test] - fn incomplete_headers_are_still_incomplete_after_advance() { - let mut queue = QueuedHeaders::::default(); - - // relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete queue - queue.incomplete_headers.insert(id(100), None); - queue.incomplete.entry(101).or_default().insert(hash(101), header(101)); - queue.incomplete.entry(102).or_default().insert(hash(102), header(102)); - queue.incomplete.entry(103).or_default().insert(hash(103), header(103)); - queue.incomplete.entry(104).or_default().insert(hash(104), header(104)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Synced); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Incomplete); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Incomplete); - queue - .known_headers - .entry(103) - .or_default() - .insert(hash(103), HeaderStatus::Incomplete); - queue - .known_headers - .entry(104) - .or_default() - .insert(hash(104), HeaderStatus::Incomplete); - - // let's say relay#2 completes header#100 and then submits header#101+header#102 and it turns - // out that header#102 is also incomplete - queue.incomplete_headers_response(vec![id(102)].into_iter().collect()); - - // then the header#103 and the header#104 must have Incomplete status - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(103)), HeaderStatus::Incomplete); - assert_eq!(queue.status(&id(104)), HeaderStatus::Incomplete); - } - - #[test] - fn incomplete_headers_response_moves_synced_headers() { - let mut queue = QueuedHeaders::::default(); - - // we have submitted two headers - 100 and 101. 102 is ready - queue.submitted.entry(100).or_default().insert(hash(100), header(100)); - queue.submitted.entry(101).or_default().insert(hash(101), header(101)); - queue.ready.entry(102).or_default().insert(hash(102), header(102)); - queue - .known_headers - .entry(100) - .or_default() - .insert(hash(100), HeaderStatus::Submitted); - queue - .known_headers - .entry(101) - .or_default() - .insert(hash(101), HeaderStatus::Submitted); - queue - .known_headers - .entry(102) - .or_default() - .insert(hash(102), HeaderStatus::Ready); - - // both headers are accepted - queue.target_best_header_response(&id(101)); - - // but header 100 is incomplete - queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); - assert!(queue.incomplete_headers.contains_key(&id(100))); - assert!(queue.incomplete[&102].contains_key(&hash(102))); - - // when header 100 is completed, 101 is synced and 102 is ready - queue.incomplete_headers_response(HashSet::new()); - assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); - assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); - assert!(queue.ready[&102].contains_key(&hash(102))); - } -} diff --git a/polkadot/bridges/relays/headers-relay/src/lib.rs b/polkadot/bridges/relays/headers-relay/src/lib.rs deleted file mode 100644 index d91fe94d9d50b0b0783a674839425c8cfe6fc1d8..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/src/lib.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying source chain headers to target chain. This module provides entrypoint -//! that starts reading new headers from source chain and submit these headers as -//! module/contract transactions to the target chain. Module/contract on the target -//! chain is a light-client of the source chain. All other trustless bridge -//! applications are built using this light-client, so running headers-relay is -//! essential for running all other bridge applications. - -// required for futures::select! -#![recursion_limit = "1024"] -#![warn(missing_docs)] - -pub mod headers; -pub mod sync; -pub mod sync_loop; -pub mod sync_loop_metrics; -pub mod sync_loop_tests; -pub mod sync_types; diff --git a/polkadot/bridges/relays/headers-relay/src/sync.rs b/polkadot/bridges/relays/headers-relay/src/sync.rs deleted file mode 100644 index 8e4c671dbaa9ed648d780c7112e6182d0897dcd7..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/src/sync.rs +++ /dev/null @@ -1,523 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Headers synchronization context. This structure wraps headers queue and is -//! able to choose: which headers to read from the source chain? Which headers -//! to submit to the target chain? The context makes decisions basing on parameters -//! passed using `HeadersSyncParams` structure. - -use crate::headers::QueuedHeaders; -use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader}; -use num_traits::{One, Saturating, Zero}; - -/// Common sync params. -#[derive(Debug, Clone)] -pub struct HeadersSyncParams { - /// Maximal number of ethereum headers to pre-download. - pub max_future_headers_to_download: usize, - /// Maximal number of active (we believe) submit header transactions. - pub max_headers_in_submitted_status: usize, - /// Maximal number of headers in single submit request. - pub max_headers_in_single_submit: usize, - /// Maximal total headers size in single submit request. - pub max_headers_size_in_single_submit: usize, - /// We only may store and accept (from Ethereum node) headers that have - /// number >= than best_substrate_header.number - prune_depth. - pub prune_depth: u32, - /// Target transactions mode. - pub target_tx_mode: TargetTransactionMode, -} - -/// Target transaction mode. -#[derive(Debug, PartialEq, Clone)] -pub enum TargetTransactionMode { - /// Submit new headers using signed transactions. - Signed, - /// Submit new headers using unsigned transactions. - Unsigned, - /// Submit new headers using signed transactions, but only when we - /// believe that sync has stalled. - Backup, -} - -/// Headers synchronization context. -#[derive(Debug)] -pub struct HeadersSync { - /// Synchronization parameters. - params: HeadersSyncParams, - /// Best header number known to source node. - source_best_number: Option, - /// Best header known to target node. - target_best_header: Option>, - /// Headers queue. - headers: QueuedHeaders

, - /// Pause headers submission. - pause_submit: bool, -} - -impl HeadersSync

{ - /// Creates new headers synchronizer. - pub fn new(params: HeadersSyncParams) -> Self { - HeadersSync { - headers: QueuedHeaders::default(), - params, - source_best_number: None, - target_best_header: None, - pause_submit: false, - } - } - - /// Return best header number known to source node. - pub fn source_best_number(&self) -> Option { - self.source_best_number - } - - /// Best header known to target node. - pub fn target_best_header(&self) -> Option> { - self.target_best_header - } - - /// Returns true if we have synced almost all known headers. - pub fn is_almost_synced(&self) -> bool { - match self.source_best_number { - Some(source_best_number) => self - .target_best_header - .map(|best| source_best_number.saturating_sub(best.0) < 4.into()) - .unwrap_or(false), - None => true, - } - } - - /// Returns synchronization status. - pub fn status(&self) -> (&Option>, &Option) { - (&self.target_best_header, &self.source_best_number) - } - - /// Returns reference to the headers queue. - pub fn headers(&self) -> &QueuedHeaders

{ - &self.headers - } - - /// Returns mutable reference to the headers queue. - pub fn headers_mut(&mut self) -> &mut QueuedHeaders

{ - &mut self.headers - } - - /// Select header that needs to be downloaded from the source node. - pub fn select_new_header_to_download(&self) -> Option { - // if we haven't received best header from source node yet, there's nothing we can download - let source_best_number = self.source_best_number?; - - // if we haven't received known best header from target node yet, there's nothing we can download - let target_best_header = self.target_best_header.as_ref()?; - - // if there's too many headers in the queue, stop downloading - let in_memory_headers = self.headers.total_headers(); - if in_memory_headers >= self.params.max_future_headers_to_download { - return None; - } - - // if queue is empty and best header on target is > than best header on source, - // then we shoud reorg - let best_queued_number = self.headers.best_queued_number(); - if best_queued_number.is_zero() && source_best_number < target_best_header.0 { - return Some(source_best_number); - } - - // we assume that there were no reorgs if we have already downloaded best header - let best_downloaded_number = std::cmp::max( - std::cmp::max(best_queued_number, self.headers.best_synced_number()), - target_best_header.0, - ); - if best_downloaded_number >= source_best_number { - return None; - } - - // download new header - Some(best_downloaded_number + One::one()) - } - - /// Selech orphan header to downoload. - pub fn select_orphan_header_to_download(&self) -> Option<&QueuedHeader

> { - let orphan_header = self.headers.header(HeaderStatus::Orphan)?; - - // we consider header orphan until we'll find it ancestor that is known to the target node - // => we may get orphan header while we ask target node whether it knows its parent - // => let's avoid fetching duplicate headers - let parent_id = orphan_header.parent_id(); - if self.headers.status(&parent_id) != HeaderStatus::Unknown { - return None; - } - - Some(orphan_header) - } - - /// Select headers that need to be submitted to the target node. - pub fn select_headers_to_submit(&self, stalled: bool) -> Option>> { - // maybe we have paused new headers submit? - if self.pause_submit { - return None; - } - - // if we operate in backup mode, we only submit headers when sync has stalled - if self.params.target_tx_mode == TargetTransactionMode::Backup && !stalled { - return None; - } - - let headers_in_submit_status = self.headers.headers_in_status(HeaderStatus::Submitted); - let headers_to_submit_count = self - .params - .max_headers_in_submitted_status - .checked_sub(headers_in_submit_status)?; - - let mut total_size = 0; - let mut total_headers = 0; - self.headers.headers(HeaderStatus::Ready, |header| { - if total_headers == headers_to_submit_count { - return false; - } - if total_headers == self.params.max_headers_in_single_submit { - return false; - } - - let encoded_size = P::estimate_size(header); - if total_headers != 0 && total_size + encoded_size > self.params.max_headers_size_in_single_submit { - return false; - } - - total_size += encoded_size; - total_headers += 1; - - true - }) - } - - /// Receive new target header number from the source node. - pub fn source_best_header_number_response(&mut self, best_header_number: P::Number) { - log::debug!( - target: "bridge", - "Received best header number from {} node: {}", - P::SOURCE_NAME, - best_header_number, - ); - self.source_best_number = Some(best_header_number); - } - - /// Receive new best header from the target node. - /// Returns true if it is different from the previous block known to us. - pub fn target_best_header_response(&mut self, best_header: HeaderIdOf

) -> bool { - log::debug!( - target: "bridge", - "Received best known header from {}: {:?}", - P::TARGET_NAME, - best_header, - ); - - // early return if it is still the same - if self.target_best_header == Some(best_header) { - return false; - } - - // remember that this header is now known to the Substrate runtime - self.headers.target_best_header_response(&best_header); - - // prune ancient headers - self.headers - .prune(best_header.0.saturating_sub(self.params.prune_depth.into())); - - // finally remember the best header itself - self.target_best_header = Some(best_header); - - // we are ready to submit headers again - if self.pause_submit { - log::debug!( - target: "bridge", - "Ready to submit {} headers to {} node again!", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - - self.pause_submit = false; - } - - true - } - - /// Pause headers submit until best header will be updated on target node. - pub fn pause_submit(&mut self) { - log::debug!( - target: "bridge", - "Stopping submitting {} headers to {} node. Waiting for {} submitted headers to be accepted", - P::SOURCE_NAME, - P::TARGET_NAME, - self.headers.headers_in_status(HeaderStatus::Submitted), - ); - - self.pause_submit = true; - } - - /// Restart synchronization. - pub fn restart(&mut self) { - self.source_best_number = None; - self.target_best_header = None; - self.headers.clear(); - self.pause_submit = false; - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use crate::headers::tests::{header, id}; - use crate::sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber}; - use crate::sync_types::HeaderStatus; - use relay_utils::HeaderId; - - fn side_hash(number: TestNumber) -> TestHash { - 1000 + number - } - - pub fn default_sync_params() -> HeadersSyncParams { - HeadersSyncParams { - max_future_headers_to_download: 128, - max_headers_in_submitted_status: 128, - max_headers_in_single_submit: 32, - max_headers_size_in_single_submit: 131_072, - prune_depth: 4096, - target_tx_mode: TargetTransactionMode::Signed, - } - } - - #[test] - fn select_new_header_to_download_works() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - - // both best && target headers are unknown - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // best header is known, target header is unknown - eth_sync.target_best_header = Some(HeaderId(0, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // target header is known, best header is unknown - eth_sync.target_best_header = None; - eth_sync.source_best_number = Some(100); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // when our best block has the same number as the target - eth_sync.target_best_header = Some(HeaderId(100, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), None); - - // when we actually need a new header - eth_sync.source_best_number = Some(101); - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - - // when we have to reorganize to longer fork - eth_sync.source_best_number = Some(100); - eth_sync.target_best_header = Some(HeaderId(200, Default::default())); - assert_eq!(eth_sync.select_new_header_to_download(), Some(100)); - - // when there are too many headers scheduled for submitting - for i in 1..1000 { - eth_sync.headers.header_response(header(i).header().clone()); - } - assert_eq!(eth_sync.select_new_header_to_download(), None); - } - - #[test] - fn select_new_header_to_download_works_with_empty_queue() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - eth_sync.source_best_header_number_response(100); - - // when queue is not empty => everything goes as usually - eth_sync.target_best_header_response(header(10).id()); - eth_sync.headers_mut().header_response(header(11).header().clone()); - eth_sync.headers_mut().maybe_extra_response(&header(11).id(), false); - assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); - - // but then queue is drained - eth_sync.headers_mut().target_best_header_response(&header(11).id()); - - // even though it's empty, we know that header#11 is synced - assert_eq!(eth_sync.headers().best_queued_number(), 0); - assert_eq!(eth_sync.headers().best_synced_number(), 11); - assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); - } - - #[test] - fn sync_without_reorgs_works() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.max_headers_in_submitted_status = 1; - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100 - eth_sync.target_best_header_response(id(100)); - - // block #101 is downloaded first - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - eth_sync.headers.header_response(header(101).header().clone()); - - // now header #101 is ready to be submitted - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); - eth_sync.headers.maybe_extra_response(&id(101), false); - assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(101))); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - - // and header #102 is ready to be downloaded - assert_eq!(eth_sync.select_new_header_to_download(), Some(102)); - eth_sync.headers.header_response(header(102).header().clone()); - - // receive submission confirmation - eth_sync.headers.headers_submitted(vec![id(101)]); - - // we have nothing to submit because previous header hasn't been confirmed yet - // (and we allow max 1 submit transaction in the wild) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(102))); - eth_sync.headers.maybe_extra_response(&id(102), false); - assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(102))); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // substrate reports that it has imported block #101 - eth_sync.target_best_header_response(id(101)); - - // and we are ready to submit #102 - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); - eth_sync.headers.headers_submitted(vec![id(102)]); - - // substrate reports that it has imported block #102 - eth_sync.target_best_header_response(id(102)); - - // and we have nothing to download - assert_eq!(eth_sync.select_new_header_to_download(), None); - } - - #[test] - fn sync_with_orphan_headers_work() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100, but it isn't part of best chain - eth_sync.target_best_header_response(HeaderId(100, side_hash(100))); - - // block #101 is downloaded first - assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); - eth_sync.headers.header_response(header(101).header().clone()); - - // we can't submit header #101, because its parent status is unknown - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // instead we are trying to determine status of its parent (#100) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(101))); - - // and the status is still unknown - eth_sync.headers.maybe_orphan_response(&id(100), false); - - // so we consider #101 orphaned now && will download its parent - #100 - assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); - eth_sync.headers.header_response(header(100).header().clone()); - - // #101 is now Orphan and #100 is MaybeOrphan => we do not want to retrieve - // header #100 again - assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); - assert_eq!(eth_sync.select_orphan_header_to_download(), None); - - // we can't submit header #100, because its parent status is unknown - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // instead we are trying to determine status of its parent (#99) - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(100))); - - // and the status is known, so we move previously orphaned #100 and #101 to ready queue - eth_sync.headers.maybe_orphan_response(&id(99), true); - - // and we are ready to submit #100 - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(100))); - eth_sync.headers.maybe_extra_response(&id(100), false); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(100)])); - eth_sync.headers.headers_submitted(vec![id(100)]); - - // and we are ready to submit #101 - assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); - eth_sync.headers.maybe_extra_response(&id(101), false); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - eth_sync.headers.headers_submitted(vec![id(101)]); - } - - #[test] - fn pruning_happens_on_target_best_header_response() { - let mut eth_sync = HeadersSync::::new(default_sync_params()); - eth_sync.params.prune_depth = 50; - eth_sync.target_best_header_response(id(100)); - assert_eq!(eth_sync.headers.prune_border(), 50); - } - - #[test] - fn only_submitting_headers_in_backup_mode_when_stalled() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.target_tx_mode = TargetTransactionMode::Backup; - - // ethereum reports best header #102 - eth_sync.source_best_header_number_response(102); - - // substrate reports that it is at block #100 - eth_sync.target_best_header_response(id(100)); - - // block #101 is downloaded first - eth_sync.headers.header_response(header(101).header().clone()); - eth_sync.headers.maybe_extra_response(&id(101), false); - - // ensure that headers are not submitted when sync is not stalled - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // ensure that headers are not submitted when sync is stalled - assert_eq!(eth_sync.select_headers_to_submit(true), Some(vec![&header(101)])); - } - - #[test] - fn does_not_select_new_headers_to_submit_when_submit_is_paused() { - let mut eth_sync = HeadersSync::new(default_sync_params()); - eth_sync.params.max_headers_in_submitted_status = 1; - - // ethereum reports best header #102 and substrate is at #100 - eth_sync.source_best_header_number_response(102); - eth_sync.target_best_header_response(id(100)); - - // let's prepare #101 and #102 for submitting - eth_sync.headers.header_response(header(101).header().clone()); - eth_sync.headers.maybe_extra_response(&id(101), false); - eth_sync.headers.header_response(header(102).header().clone()); - eth_sync.headers.maybe_extra_response(&id(102), false); - - // when submit is not paused, we're ready to submit #101 - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); - - // when submit is paused, we're not ready to submit anything - eth_sync.pause_submit(); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // if best header on substrate node isn't updated, we still not submitting anything - eth_sync.target_best_header_response(id(100)); - assert_eq!(eth_sync.select_headers_to_submit(false), None); - - // but after it is actually updated, we are ready to submit - eth_sync.target_best_header_response(id(101)); - assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); - } -} diff --git a/polkadot/bridges/relays/headers-relay/src/sync_loop.rs b/polkadot/bridges/relays/headers-relay/src/sync_loop.rs deleted file mode 100644 index d2584f2ccb2c816c1f83e78b2a7757e0f9fcd0be..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/src/sync_loop.rs +++ /dev/null @@ -1,654 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Entrypoint for running headers synchronization loop. - -use crate::sync::{HeadersSync, HeadersSyncParams}; -use crate::sync_loop_metrics::SyncLoopMetrics; -use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders}; - -use async_trait::async_trait; -use futures::{future::FutureExt, stream::StreamExt}; -use num_traits::{Saturating, Zero}; -use relay_utils::{ - format_ids, interval, - metrics::{start as metrics_start, GlobalMetrics, MetricsParams}, - process_future_result, - relay_loop::Client as RelayClient, - retry_backoff, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, -}; -use std::{ - collections::HashSet, - future::Future, - time::{Duration, Instant}, -}; - -/// When we submit headers to target node, but see no updates of best -/// source block known to target node during STALL_SYNC_TIMEOUT seconds, -/// we consider that our headers are rejected because there has been reorg in target chain. -/// This reorg could invalidate our knowledge about sync process (i.e. we have asked if -/// HeaderA is known to target, but then reorg happened and the answer is different -/// now) => we need to reset sync. -/// The other option is to receive **EVERY** best target header and check if it is -/// direct child of previous best header. But: (1) subscription doesn't guarantee that -/// the subscriber will receive every best header (2) reorg won't always lead to sync -/// stall and restart is a heavy operation (we forget all in-memory headers). -const STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(5 * 60); -/// Delay after we have seen update of best source header at target node, -/// for us to treat sync stalled. ONLY when relay operates in backup mode. -const BACKUP_STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(10 * 60); -/// Interval between calling sync maintain procedure. -const MAINTAIN_INTERVAL: Duration = Duration::from_secs(30); - -/// Source client trait. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Get best block number. - async fn best_block_number(&self) -> Result; - - /// Get header by hash. - async fn header_by_hash(&self, hash: P::Hash) -> Result; - - /// Get canonical header by number. - async fn header_by_number(&self, number: P::Number) -> Result; - - /// Get completion data by header hash. - async fn header_completion(&self, id: HeaderIdOf

) - -> Result<(HeaderIdOf

, Option), Self::Error>; - - /// Get extra data by header hash. - async fn header_extra( - &self, - id: HeaderIdOf

, - header: QueuedHeader

, - ) -> Result<(HeaderIdOf

, P::Extra), Self::Error>; -} - -/// Target client trait. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Returns ID of best header known to the target node. - async fn best_header_id(&self) -> Result, Self::Error>; - - /// Returns true if header is known to the target node. - async fn is_known_header(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, bool), Self::Error>; - - /// Submit headers. - async fn submit_headers(&self, headers: Vec>) -> SubmittedHeaders, Self::Error>; - - /// Returns ID of headers that require to be 'completed' before children can be submitted. - async fn incomplete_headers_ids(&self) -> Result>, Self::Error>; - - /// Submit completion data for header. - async fn complete_header(&self, id: HeaderIdOf

, completion: P::Completion) - -> Result, Self::Error>; - - /// Returns true if header requires extra data to be submitted. - async fn requires_extra(&self, header: QueuedHeader

) -> Result<(HeaderIdOf

, bool), Self::Error>; -} - -/// Synchronization maintain procedure. -#[async_trait] -pub trait SyncMaintain: Clone + Send + Sync { - /// Run custom maintain procedures. This is guaranteed to be called when both source and target - /// clients are unoccupied. - async fn maintain(&self, _sync: &mut HeadersSync

) {} -} - -impl SyncMaintain

for () {} - -/// Run headers synchronization. -#[allow(clippy::too_many_arguments)] -pub fn run>( - source_client: impl SourceClient

, - source_tick: Duration, - target_client: TC, - target_tick: Duration, - sync_maintain: impl SyncMaintain

, - sync_params: HeadersSyncParams, - metrics_params: Option, - exit_signal: impl Future, -) { - let exit_signal = exit_signal.shared(); - - let metrics_global = GlobalMetrics::default(); - let metrics_sync = SyncLoopMetrics::default(); - let metrics_enabled = metrics_params.is_some(); - metrics_start( - format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME), - metrics_params, - &metrics_global, - &metrics_sync, - ); - - relay_utils::relay_loop::run( - relay_utils::relay_loop::RECONNECT_DELAY, - source_client, - target_client, - |source_client, target_client| { - run_until_connection_lost( - source_client, - source_tick, - target_client, - target_tick, - sync_maintain.clone(), - sync_params.clone(), - if metrics_enabled { - Some(metrics_global.clone()) - } else { - None - }, - if metrics_enabled { - Some(metrics_sync.clone()) - } else { - None - }, - exit_signal.clone(), - ) - }, - ); -} - -/// Run headers synchronization. -#[allow(clippy::too_many_arguments)] -async fn run_until_connection_lost>( - source_client: impl SourceClient

, - source_tick: Duration, - target_client: TC, - target_tick: Duration, - sync_maintain: impl SyncMaintain

, - sync_params: HeadersSyncParams, - metrics_global: Option, - metrics_sync: Option, - exit_signal: impl Future, -) -> Result<(), FailedClient> { - let mut progress_context = (Instant::now(), None, None); - - let mut sync = HeadersSync::

::new(sync_params); - let mut stall_countdown = None; - let mut last_update_time = Instant::now(); - - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = false; - let mut source_best_block_number_required = false; - let source_best_block_number_future = source_client.best_block_number().fuse(); - let source_new_header_future = futures::future::Fuse::terminated(); - let source_orphan_header_future = futures::future::Fuse::terminated(); - let source_extra_future = futures::future::Fuse::terminated(); - let source_completion_future = futures::future::Fuse::terminated(); - let source_go_offline_future = futures::future::Fuse::terminated(); - let source_tick_stream = interval(source_tick).fuse(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = false; - let mut target_best_block_required = false; - let mut target_incomplete_headers_required = true; - let target_best_block_future = target_client.best_header_id().fuse(); - let target_incomplete_headers_future = futures::future::Fuse::terminated(); - let target_extra_check_future = futures::future::Fuse::terminated(); - let target_existence_status_future = futures::future::Fuse::terminated(); - let target_submit_header_future = futures::future::Fuse::terminated(); - let target_complete_header_future = futures::future::Fuse::terminated(); - let target_go_offline_future = futures::future::Fuse::terminated(); - let target_tick_stream = interval(target_tick).fuse(); - - let mut maintain_required = false; - let maintain_stream = interval(MAINTAIN_INTERVAL).fuse(); - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!( - source_best_block_number_future, - source_new_header_future, - source_orphan_header_future, - source_extra_future, - source_completion_future, - source_go_offline_future, - source_tick_stream, - target_best_block_future, - target_incomplete_headers_future, - target_extra_check_future, - target_existence_status_future, - target_submit_header_future, - target_complete_header_future, - target_go_offline_future, - target_tick_stream, - maintain_stream, - exit_signal - ); - - loop { - futures::select! { - source_best_block_number = source_best_block_number_future => { - source_best_block_number_required = false; - - source_client_is_online = process_future_result( - source_best_block_number, - &mut source_retry_backoff, - |source_best_block_number| sync.source_best_header_number_response(source_best_block_number), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best header number from {}", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_new_header = source_new_header_future => { - source_client_is_online = process_future_result( - source_new_header, - &mut source_retry_backoff, - |source_new_header| sync.headers_mut().header_response(source_new_header), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving header from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_orphan_header = source_orphan_header_future => { - source_client_is_online = process_future_result( - source_orphan_header, - &mut source_retry_backoff, - |source_orphan_header| sync.headers_mut().header_response(source_orphan_header), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving orphan header from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_extra = source_extra_future => { - source_client_is_online = process_future_result( - source_extra, - &mut source_retry_backoff, - |(header, extra)| sync.headers_mut().extra_response(&header, extra), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving extra data from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - source_completion = source_completion_future => { - source_client_is_online = process_future_result( - source_completion, - &mut source_retry_backoff, - |(header, completion)| sync.headers_mut().completion_response(&header, completion), - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving completion data from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = source_tick_stream.next() => { - if sync.is_almost_synced() { - source_best_block_number_required = true; - } - }, - target_best_block = target_best_block_future => { - target_best_block_required = false; - - target_client_is_online = process_future_result( - target_best_block, - &mut target_retry_backoff, - |target_best_block| { - let head_updated = sync.target_best_header_response(target_best_block); - if head_updated { - last_update_time = Instant::now(); - } - match head_updated { - // IF head is updated AND there are still our transactions: - // => restart stall countdown timer - true if sync.headers().headers_in_status(HeaderStatus::Submitted) != 0 => - stall_countdown = Some(Instant::now()), - // IF head is updated AND there are no our transactions: - // => stop stall countdown timer - true => stall_countdown = None, - // IF head is not updated AND stall countdown is not yet completed - // => do nothing - false if stall_countdown - .map(|stall_countdown| stall_countdown.elapsed() < STALL_SYNC_TIMEOUT) - .unwrap_or(true) - => (), - // IF head is not updated AND stall countdown has completed - // => restart sync - false => { - log::info!( - target: "bridge", - "Sync has stalled. Restarting {} headers synchronization.", - P::SOURCE_NAME, - ); - stall_countdown = None; - sync.restart(); - }, - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best known {} header from {} node", P::SOURCE_NAME, P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - incomplete_headers_ids = target_incomplete_headers_future => { - target_incomplete_headers_required = false; - - target_client_is_online = process_future_result( - incomplete_headers_ids, - &mut target_retry_backoff, - |incomplete_headers_ids| sync.headers_mut().incomplete_headers_response(incomplete_headers_ids), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving incomplete headers from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - target_existence_status = target_existence_status_future => { - target_client_is_online = process_future_result( - target_existence_status, - &mut target_retry_backoff, - |(target_header, target_existence_status)| sync - .headers_mut() - .maybe_orphan_response(&target_header, target_existence_status), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving existence status from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - submitted_headers = target_submit_header_future => { - // following line helps Rust understand the type of `submitted_headers` :/ - let submitted_headers: SubmittedHeaders, TC::Error> = submitted_headers; - let submitted_headers_str = format!("{}", submitted_headers); - let all_headers_rejected = submitted_headers.submitted.is_empty() - && submitted_headers.incomplete.is_empty(); - let has_submitted_headers = sync.headers().headers_in_status(HeaderStatus::Submitted) != 0; - - let maybe_fatal_error = match submitted_headers.fatal_error { - Some(fatal_error) => Err(StringifiedMaybeConnectionError::new( - fatal_error.is_connection_error(), - format!("{:?}", fatal_error), - )), - None if all_headers_rejected && !has_submitted_headers => - Err(StringifiedMaybeConnectionError::new(false, "All headers were rejected".into())), - None => Ok(()), - }; - - let no_fatal_error = maybe_fatal_error.is_ok(); - target_client_is_online = process_future_result( - maybe_fatal_error, - &mut target_retry_backoff, - |_| {}, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error submitting headers to {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - - log::debug!(target: "bridge", "Header submit result: {}", submitted_headers_str); - - sync.headers_mut().headers_submitted(submitted_headers.submitted); - sync.headers_mut().add_incomplete_headers(false, submitted_headers.incomplete); - - // when there's no fatal error, but node has rejected all our headers we may - // want to pause until our submitted headers will be accepted - if no_fatal_error && all_headers_rejected && has_submitted_headers { - sync.pause_submit(); - } - }, - target_complete_header_result = target_complete_header_future => { - target_client_is_online = process_future_result( - target_complete_header_result, - &mut target_retry_backoff, - |completed_header| sync.headers_mut().header_completed(&completed_header), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error completing headers at {}", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - target_extra_check_result = target_extra_check_future => { - target_client_is_online = process_future_result( - target_extra_check_result, - &mut target_retry_backoff, - |(header, extra_check_result)| sync - .headers_mut() - .maybe_extra_response(&header, extra_check_result), - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving receipts requirement from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - _ = target_tick_stream.next() => { - target_best_block_required = true; - target_incomplete_headers_required = true; - }, - - _ = maintain_stream.next() => { - maintain_required = true; - }, - _ = exit_signal => { - return Ok(()); - } - } - - // update metrics - if let Some(ref metrics_global) = metrics_global { - metrics_global.update().await; - } - if let Some(ref metrics_sync) = metrics_sync { - metrics_sync.update(&sync); - } - - // print progress - progress_context = print_sync_progress(progress_context, &sync); - - // run maintain procedures - if maintain_required && source_client_is_online && target_client_is_online { - log::debug!(target: "bridge", "Maintaining headers sync loop"); - maintain_required = false; - sync_maintain.maintain(&mut sync).await; - } - - // If the target client is accepting requests we update the requests that - // we want it to run - if !maintain_required && target_client_is_online { - // NOTE: Is is important to reset this so that we only have one - // request being processed by the client at a time. This prevents - // race conditions like receiving two transactions with the same - // nonce from the client. - target_client_is_online = false; - - // The following is how we prioritize requests: - // - // 1. Get best block - // - Stops us from downloading or submitting new blocks - // - Only called rarely - // - // 2. Get incomplete headers - // - Stops us from submitting new blocks - // - Only called rarely - // - // 3. Get complete headers - // - Stops us from submitting new blocks - // - // 4. Check if we need extra data from source - // - Stops us from downloading or submitting new blocks - // - // 5. Check existence of header - // - Stops us from submitting new blocks - // - // 6. Submit header - - if target_best_block_required { - log::debug!(target: "bridge", "Asking {} about best block", P::TARGET_NAME); - target_best_block_future.set(target_client.best_header_id().fuse()); - } else if target_incomplete_headers_required { - log::debug!(target: "bridge", "Asking {} about incomplete headers", P::TARGET_NAME); - target_incomplete_headers_future.set(target_client.incomplete_headers_ids().fuse()); - } else if let Some((id, completion)) = sync.headers_mut().header_to_complete() { - log::debug!( - target: "bridge", - "Going to complete header: {:?}", - id, - ); - - target_complete_header_future.set(target_client.complete_header(id, completion.clone()).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeExtra) { - log::debug!( - target: "bridge", - "Checking if header submission requires extra: {:?}", - header.id(), - ); - - target_extra_check_future.set(target_client.requires_extra(header.clone()).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeOrphan) { - // for MaybeOrphan we actually ask for parent' header existence - let parent_id = header.parent_id(); - - log::debug!( - target: "bridge", - "Asking {} node for existence of: {:?}", - P::TARGET_NAME, - parent_id, - ); - - target_existence_status_future.set(target_client.is_known_header(parent_id).fuse()); - } else if let Some(headers) = - sync.select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT) - { - log::debug!( - target: "bridge", - "Submitting {} header(s) to {} node: {:?}", - headers.len(), - P::TARGET_NAME, - format_ids(headers.iter().map(|header| header.id())), - ); - - let headers = headers.into_iter().cloned().collect(); - target_submit_header_future.set(target_client.submit_headers(headers).fuse()); - - // remember that we have submitted some headers - if stall_countdown.is_none() { - stall_countdown = Some(Instant::now()); - } - } else { - target_client_is_online = true; - } - } - - // If the source client is accepting requests we update the requests that - // we want it to run - if !maintain_required && source_client_is_online { - // NOTE: Is is important to reset this so that we only have one - // request being processed by the client at a time. This prevents - // race conditions like receiving two transactions with the same - // nonce from the client. - source_client_is_online = false; - - // The following is how we prioritize requests: - // - // 1. Get best block - // - Stops us from downloading or submitting new blocks - // - Only called rarely - // - // 2. Download completion data - // - Stops us from submitting new blocks - // - // 3. Download extra data - // - Stops us from submitting new blocks - // - // 4. Download missing headers - // - Stops us from downloading or submitting new blocks - // - // 5. Downloading new headers - - if source_best_block_number_required { - log::debug!(target: "bridge", "Asking {} node about best block number", P::SOURCE_NAME); - source_best_block_number_future.set(source_client.best_block_number().fuse()); - } else if let Some(id) = sync.headers_mut().incomplete_header() { - log::debug!( - target: "bridge", - "Retrieving completion data for header: {:?}", - id, - ); - source_completion_future.set(source_client.header_completion(id).fuse()); - } else if let Some(header) = sync.headers().header(HeaderStatus::Extra) { - let id = header.id(); - log::debug!( - target: "bridge", - "Retrieving extra data for header: {:?}", - id, - ); - source_extra_future.set(source_client.header_extra(id, header.clone()).fuse()); - } else if let Some(header) = sync.select_orphan_header_to_download() { - // for Orphan we actually ask for parent' header - let parent_id = header.parent_id(); - - // if we have end up with orphan header#0, then we are misconfigured - if parent_id.0.is_zero() { - log::error!( - target: "bridge", - "Misconfiguration. Genesis {} header is considered orphan by {} node", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - return Ok(()); - } - - log::debug!( - target: "bridge", - "Going to download orphan header from {} node: {:?}", - P::SOURCE_NAME, - parent_id, - ); - - source_orphan_header_future.set(source_client.header_by_hash(parent_id.1).fuse()); - } else if let Some(id) = sync.select_new_header_to_download() { - log::debug!( - target: "bridge", - "Going to download new header from {} node: {:?}", - P::SOURCE_NAME, - id, - ); - - source_new_header_future.set(source_client.header_by_number(id).fuse()); - } else { - source_client_is_online = true; - } - } - } -} - -/// Print synchronization progress. -fn print_sync_progress( - progress_context: (Instant, Option, Option), - eth_sync: &HeadersSync

, -) -> (Instant, Option, Option) { - let (prev_time, prev_best_header, prev_target_header) = progress_context; - let now_time = Instant::now(); - let (now_best_header, now_target_header) = eth_sync.status(); - - let need_update = now_time - prev_time > Duration::from_secs(10) - || match (prev_best_header, now_best_header) { - (Some(prev_best_header), Some(now_best_header)) => { - now_best_header.0.saturating_sub(prev_best_header) > 10.into() - } - _ => false, - }; - if !need_update { - return (prev_time, prev_best_header, prev_target_header); - } - - log::info!( - target: "bridge", - "Synced {:?} of {:?} headers", - now_best_header.map(|id| id.0), - now_target_header, - ); - (now_time, now_best_header.clone().map(|id| id.0), *now_target_header) -} diff --git a/polkadot/bridges/relays/headers-relay/src/sync_loop_metrics.rs b/polkadot/bridges/relays/headers-relay/src/sync_loop_metrics.rs deleted file mode 100644 index 456aa0a6b052245ad36b000c99b8a265c44cb9f2..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/src/sync_loop_metrics.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for headers synchronization relay loop. - -use crate::sync::HeadersSync; -use crate::sync_types::{HeaderStatus, HeadersSyncPipeline}; - -use num_traits::Zero; -use relay_utils::metrics::{register, GaugeVec, Metrics, Opts, Registry, U64}; - -/// Headers sync metrics. -#[derive(Clone)] -pub struct SyncLoopMetrics { - /// Best syncing headers at "source" and "target" nodes. - best_block_numbers: GaugeVec, - /// Number of headers in given states (see `HeaderStatus`). - blocks_in_state: GaugeVec, -} - -impl Metrics for SyncLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.best_block_numbers.clone(), registry).map_err(|e| e.to_string())?; - register(self.blocks_in_state.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) - } -} - -impl Default for SyncLoopMetrics { - fn default() -> Self { - SyncLoopMetrics { - best_block_numbers: GaugeVec::new( - Opts::new("best_block_numbers", "Best block numbers on source and target nodes"), - &["node"], - ) - .expect("metric is static and thus valid; qed"), - blocks_in_state: GaugeVec::new( - Opts::new("blocks_in_state", "Number of blocks in given state"), - &["state"], - ) - .expect("metric is static and thus valid; qed"), - } - } -} - -impl SyncLoopMetrics { - /// Update metrics. - pub fn update(&self, sync: &HeadersSync

) { - let headers = sync.headers(); - let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero); - let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero); - - self.best_block_numbers - .with_label_values(&["source"]) - .set(source_best_number.into()); - self.best_block_numbers - .with_label_values(&["target"]) - .set(target_best_number.into()); - - self.blocks_in_state - .with_label_values(&["maybe_orphan"]) - .set(headers.headers_in_status(HeaderStatus::MaybeOrphan) as _); - self.blocks_in_state - .with_label_values(&["orphan"]) - .set(headers.headers_in_status(HeaderStatus::Orphan) as _); - self.blocks_in_state - .with_label_values(&["maybe_extra"]) - .set(headers.headers_in_status(HeaderStatus::MaybeExtra) as _); - self.blocks_in_state - .with_label_values(&["extra"]) - .set(headers.headers_in_status(HeaderStatus::Extra) as _); - self.blocks_in_state - .with_label_values(&["ready"]) - .set(headers.headers_in_status(HeaderStatus::Ready) as _); - self.blocks_in_state - .with_label_values(&["incomplete"]) - .set(headers.headers_in_status(HeaderStatus::Incomplete) as _); - self.blocks_in_state - .with_label_values(&["submitted"]) - .set(headers.headers_in_status(HeaderStatus::Submitted) as _); - } -} diff --git a/polkadot/bridges/relays/headers-relay/src/sync_loop_tests.rs b/polkadot/bridges/relays/headers-relay/src/sync_loop_tests.rs deleted file mode 100644 index 5cfd5e4f57b664da373bd1015614f01aed5a3d28..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/src/sync_loop_tests.rs +++ /dev/null @@ -1,593 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg(test)] - -use crate::sync_loop::{run, SourceClient, TargetClient}; -use crate::sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}; - -use async_trait::async_trait; -use backoff::backoff::Backoff; -use futures::{future::FutureExt, stream::StreamExt}; -use parking_lot::Mutex; -use relay_utils::{ - process_future_result, relay_loop::Client as RelayClient, retry_backoff, HeaderId, MaybeConnectionError, -}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::Duration, -}; - -pub type TestNumber = u64; -pub type TestHash = u64; -pub type TestHeaderId = HeaderId; -pub type TestExtra = u64; -pub type TestCompletion = u64; -pub type TestQueuedHeader = QueuedHeader; - -#[derive(Default, Debug, Clone, PartialEq)] -pub struct TestHeader { - pub hash: TestHash, - pub number: TestNumber, - pub parent_hash: TestHash, -} - -impl SourceHeader for TestHeader { - fn id(&self) -> TestHeaderId { - HeaderId(self.number, self.hash) - } - - fn parent_id(&self) -> TestHeaderId { - HeaderId(self.number - 1, self.parent_hash) - } -} - -#[derive(Debug, Clone)] -struct TestError(bool); - -impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - self.0 - } -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct TestHeadersSyncPipeline; - -impl HeadersSyncPipeline for TestHeadersSyncPipeline { - const SOURCE_NAME: &'static str = "Source"; - const TARGET_NAME: &'static str = "Target"; - - type Hash = TestHash; - type Number = TestNumber; - type Header = TestHeader; - type Extra = TestExtra; - type Completion = TestCompletion; - - fn estimate_size(_: &TestQueuedHeader) -> usize { - 0 - } -} - -enum SourceMethod { - BestBlockNumber, - HeaderByHash(TestHash), - HeaderByNumber(TestNumber), - HeaderCompletion(TestHeaderId), - HeaderExtra(TestHeaderId, TestQueuedHeader), -} - -#[derive(Clone)] -struct Source { - data: Arc>, - on_method_call: Arc, -} - -struct SourceData { - best_block_number: Result, - header_by_hash: HashMap, - header_by_number: HashMap, - provides_completion: bool, - provides_extra: bool, -} - -impl Source { - pub fn new( - best_block_id: TestHeaderId, - headers: Vec<(bool, TestHeader)>, - on_method_call: impl Fn(SourceMethod, &mut SourceData) + Send + Sync + 'static, - ) -> Self { - Source { - data: Arc::new(Mutex::new(SourceData { - best_block_number: Ok(best_block_id.0), - header_by_hash: headers - .iter() - .map(|(_, header)| (header.hash, header.clone())) - .collect(), - header_by_number: headers - .iter() - .filter_map(|(is_canonical, header)| { - if *is_canonical { - Some((header.hash, header.clone())) - } else { - None - } - }) - .collect(), - provides_completion: true, - provides_extra: true, - })), - on_method_call: Arc::new(on_method_call), - } - } -} - -#[async_trait] -impl RelayClient for Source { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unimplemented!() - } -} - -#[async_trait] -impl SourceClient for Source { - async fn best_block_number(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::BestBlockNumber, &mut *data); - data.best_block_number.clone() - } - - async fn header_by_hash(&self, hash: TestHash) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderByHash(hash), &mut *data); - data.header_by_hash.get(&hash).cloned().ok_or(TestError(false)) - } - - async fn header_by_number(&self, number: TestNumber) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderByNumber(number), &mut *data); - data.header_by_number.get(&number).cloned().ok_or(TestError(false)) - } - - async fn header_completion(&self, id: TestHeaderId) -> Result<(TestHeaderId, Option), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderCompletion(id), &mut *data); - if data.provides_completion { - Ok((id, Some(test_completion(id)))) - } else { - Ok((id, None)) - } - } - - async fn header_extra( - &self, - id: TestHeaderId, - header: TestQueuedHeader, - ) -> Result<(TestHeaderId, TestExtra), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(SourceMethod::HeaderExtra(id, header), &mut *data); - if data.provides_extra { - Ok((id, test_extra(id))) - } else { - Err(TestError(false)) - } - } -} - -enum TargetMethod { - BestHeaderId, - IsKnownHeader(TestHeaderId), - SubmitHeaders(Vec), - IncompleteHeadersIds, - CompleteHeader(TestHeaderId, TestCompletion), - RequiresExtra(TestQueuedHeader), -} - -#[derive(Clone)] -struct Target { - data: Arc>, - on_method_call: Arc, -} - -struct TargetData { - best_header_id: Result, - is_known_header_by_hash: HashMap, - submitted_headers: HashMap, - submit_headers_result: Option>, - completed_headers: HashMap, - requires_completion: bool, - requires_extra: bool, -} - -impl Target { - pub fn new( - best_header_id: TestHeaderId, - headers: Vec, - on_method_call: impl Fn(TargetMethod, &mut TargetData) + Send + Sync + 'static, - ) -> Self { - Target { - data: Arc::new(Mutex::new(TargetData { - best_header_id: Ok(best_header_id), - is_known_header_by_hash: headers.iter().map(|header| (header.1, true)).collect(), - submitted_headers: HashMap::new(), - submit_headers_result: None, - completed_headers: HashMap::new(), - requires_completion: false, - requires_extra: false, - })), - on_method_call: Arc::new(on_method_call), - } - } -} - -#[async_trait] -impl RelayClient for Target { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unimplemented!() - } -} - -#[async_trait] -impl TargetClient for Target { - async fn best_header_id(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::BestHeaderId, &mut *data); - data.best_header_id.clone() - } - - async fn is_known_header(&self, id: TestHeaderId) -> Result<(TestHeaderId, bool), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::IsKnownHeader(id), &mut *data); - data.is_known_header_by_hash - .get(&id.1) - .cloned() - .map(|is_known_header| Ok((id, is_known_header))) - .unwrap_or(Ok((id, false))) - } - - async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::SubmitHeaders(headers.clone()), &mut *data); - data.submitted_headers - .extend(headers.iter().map(|header| (header.id().1, header.clone()))); - data.submit_headers_result.take().expect("test must accept headers") - } - - async fn incomplete_headers_ids(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::IncompleteHeadersIds, &mut *data); - if data.requires_completion { - Ok(data - .submitted_headers - .iter() - .filter(|(hash, _)| !data.completed_headers.contains_key(hash)) - .map(|(_, header)| header.id()) - .collect()) - } else { - Ok(HashSet::new()) - } - } - - async fn complete_header(&self, id: TestHeaderId, completion: TestCompletion) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::CompleteHeader(id, completion), &mut *data); - data.completed_headers.insert(id.1, completion); - Ok(id) - } - - async fn requires_extra(&self, header: TestQueuedHeader) -> Result<(TestHeaderId, bool), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(TargetMethod::RequiresExtra(header.clone()), &mut *data); - if data.requires_extra { - Ok((header.id(), true)) - } else { - Ok((header.id(), false)) - } - } -} - -fn test_tick() -> Duration { - // in ideal world that should have been Duration::from_millis(0), because we do not want - // to sleep in tests at all, but that could lead to `select! {}` always waking on tick - // => not doing actual job - Duration::from_millis(10) -} - -fn test_id(number: TestNumber) -> TestHeaderId { - HeaderId(number, number) -} - -fn test_header(number: TestNumber) -> TestHeader { - let id = test_id(number); - TestHeader { - hash: id.1, - number: id.0, - parent_hash: if number == 0 { - TestHash::default() - } else { - test_id(number - 1).1 - }, - } -} - -fn test_forked_id(number: TestNumber, forked_from: TestNumber) -> TestHeaderId { - const FORK_OFFSET: TestNumber = 1000; - - if number == forked_from { - HeaderId(number, number) - } else { - HeaderId(number, FORK_OFFSET + number) - } -} - -fn test_forked_header(number: TestNumber, forked_from: TestNumber) -> TestHeader { - let id = test_forked_id(number, forked_from); - TestHeader { - hash: id.1, - number: id.0, - parent_hash: if number == 0 { - TestHash::default() - } else { - test_forked_id(number - 1, forked_from).1 - }, - } -} - -fn test_completion(id: TestHeaderId) -> TestCompletion { - id.0 -} - -fn test_extra(id: TestHeaderId) -> TestExtra { - id.0 -} - -fn source_reject_completion(method: &SourceMethod) { - if let SourceMethod::HeaderCompletion(_) = method { - unreachable!("HeaderCompletion request is not expected") - } -} - -fn source_reject_extra(method: &SourceMethod) { - if let SourceMethod::HeaderExtra(_, _) = method { - unreachable!("HeaderExtra request is not expected") - } -} - -fn target_accept_all_headers(method: &TargetMethod, data: &mut TargetData, requires_extra: bool) { - if let TargetMethod::SubmitHeaders(ref submitted) = method { - assert_eq!(submitted.iter().all(|header| header.extra().is_some()), requires_extra,); - - data.submit_headers_result = Some(SubmittedHeaders { - submitted: submitted.iter().map(|header| header.id()).collect(), - ..Default::default() - }); - } -} - -fn target_signal_exit_when_header_submitted( - method: &TargetMethod, - header_id: TestHeaderId, - exit_signal: &futures::channel::mpsc::UnboundedSender<()>, -) { - if let TargetMethod::SubmitHeaders(ref submitted) = method { - if submitted.iter().any(|header| header.id() == header_id) { - exit_signal.unbounded_send(()).unwrap(); - } - } -} - -fn target_signal_exit_when_header_completed( - method: &TargetMethod, - header_id: TestHeaderId, - exit_signal: &futures::channel::mpsc::UnboundedSender<()>, -) { - if let TargetMethod::CompleteHeader(completed_id, _) = method { - if *completed_id == header_id { - exit_signal.unbounded_send(()).unwrap(); - } - } -} - -fn run_backoff_test(result: Result<(), TestError>) -> (Duration, Duration) { - let mut backoff = retry_backoff(); - - // no randomness in tests (otherwise intervals may overlap => asserts are failing) - backoff.randomization_factor = 0f64; - - // increase backoff's current interval - let interval1 = backoff.next_backoff().unwrap(); - let interval2 = backoff.next_backoff().unwrap(); - assert!(interval2 > interval1); - - // successful future result leads to backoff's reset - let go_offline_future = futures::future::Fuse::terminated(); - futures::pin_mut!(go_offline_future); - - process_future_result( - result, - &mut backoff, - |_| {}, - &mut go_offline_future, - async_std::task::sleep, - || "Test error".into(), - ); - - (interval2, backoff.next_backoff().unwrap()) -} - -#[test] -fn process_future_result_resets_backoff_on_success() { - let (interval2, interval_after_reset) = run_backoff_test(Ok(())); - assert!(interval2 > interval_after_reset); -} - -#[test] -fn process_future_result_resets_backoff_on_connection_error() { - let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(true))); - assert!(interval2 > interval_after_reset); -} - -#[test] -fn process_future_result_does_not_reset_backoff_on_non_connection_error() { - let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(false))); - assert!(interval2 < interval_after_reset); -} - -struct SyncLoopTestParams { - best_source_header: TestHeader, - headers_on_source: Vec<(bool, TestHeader)>, - best_target_header: TestHeader, - headers_on_target: Vec, - target_requires_extra: bool, - target_requires_completion: bool, - stop_at: TestHeaderId, -} - -fn run_sync_loop_test(params: SyncLoopTestParams) { - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - let target_requires_extra = params.target_requires_extra; - let target_requires_completion = params.target_requires_completion; - let stop_at = params.stop_at; - let source = Source::new( - params.best_source_header.id(), - params.headers_on_source, - move |method, _| { - if !target_requires_extra { - source_reject_extra(&method); - } - if !target_requires_completion { - source_reject_completion(&method); - } - }, - ); - let target = Target::new( - params.best_target_header.id(), - params.headers_on_target.into_iter().map(|header| header.id()).collect(), - move |method, data| { - target_accept_all_headers(&method, data, target_requires_extra); - if target_requires_completion { - target_signal_exit_when_header_completed(&method, stop_at, &exit_sender); - } else { - target_signal_exit_when_header_submitted(&method, stop_at, &exit_sender); - } - }, - ); - target.data.lock().requires_extra = target_requires_extra; - target.data.lock().requires_completion = target_requires_completion; - - run( - source, - test_tick(), - target, - test_tick(), - (), - crate::sync::tests::default_sync_params(), - None, - exit_receiver.into_future().map(|(_, _)| ()), - ); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header_with_extra() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: true, - target_requires_completion: false, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_synchronize_single_header_with_completion() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(1), - headers_on_source: vec![(true, test_header(1))], - best_target_header: test_header(0), - headers_on_target: vec![test_header(0)], - target_requires_extra: false, - target_requires_completion: true, - stop_at: test_id(1), - }); -} - -#[test] -fn sync_loop_is_able_to_reorganize_from_shorter_fork() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(3), - headers_on_source: vec![ - (true, test_header(1)), - (true, test_header(2)), - (true, test_header(3)), - (false, test_forked_header(1, 0)), - (false, test_forked_header(2, 0)), - ], - best_target_header: test_forked_header(2, 0), - headers_on_target: vec![test_header(0), test_forked_header(1, 0), test_forked_header(2, 0)], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(3), - }); -} - -#[test] -fn sync_loop_is_able_to_reorganize_from_longer_fork() { - run_sync_loop_test(SyncLoopTestParams { - best_source_header: test_header(3), - headers_on_source: vec![ - (true, test_header(1)), - (true, test_header(2)), - (true, test_header(3)), - (false, test_forked_header(1, 0)), - (false, test_forked_header(2, 0)), - (false, test_forked_header(3, 0)), - (false, test_forked_header(4, 0)), - (false, test_forked_header(5, 0)), - ], - best_target_header: test_forked_header(5, 0), - headers_on_target: vec![ - test_header(0), - test_forked_header(1, 0), - test_forked_header(2, 0), - test_forked_header(3, 0), - test_forked_header(4, 0), - test_forked_header(5, 0), - ], - target_requires_extra: false, - target_requires_completion: false, - stop_at: test_id(3), - }); -} diff --git a/polkadot/bridges/relays/headers-relay/src/sync_types.rs b/polkadot/bridges/relays/headers-relay/src/sync_types.rs deleted file mode 100644 index e1da6232258c9f0ebd8babe4016791b1adabbadb..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/headers-relay/src/sync_types.rs +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that are used by headers synchronization components. - -use relay_utils::{format_ids, HeaderId}; -use std::{ops::Deref, sync::Arc}; - -/// Ethereum header synchronization status. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum HeaderStatus { - /// Header is unknown. - Unknown, - /// Header is in MaybeOrphan queue. - MaybeOrphan, - /// Header is in Orphan queue. - Orphan, - /// Header is in MaybeExtra queue. - MaybeExtra, - /// Header is in Extra queue. - Extra, - /// Header is in Ready queue. - Ready, - /// Header is in Incomplete queue. - Incomplete, - /// Header has been recently submitted to the target node. - Submitted, - /// Header is known to the target node. - Synced, -} - -/// Headers synchronization pipeline. -pub trait HeadersSyncPipeline: Clone + Send + Sync { - /// Name of the headers source. - const SOURCE_NAME: &'static str; - /// Name of the headers target. - const TARGET_NAME: &'static str; - - /// Headers we're syncing are identified by this hash. - type Hash: Eq + Clone + Copy + Send + Sync + std::fmt::Debug + std::fmt::Display + std::hash::Hash; - /// Headers we're syncing are identified by this number. - type Number: relay_utils::BlockNumberBase; - /// Type of header that we're syncing. - type Header: SourceHeader; - /// Type of extra data for the header that we're receiving from the source node: - /// 1) extra data is required for some headers; - /// 2) target node may answer if it'll require extra data before header is submitted; - /// 3) extra data available since the header creation time; - /// 4) header and extra data are submitted in single transaction. - /// - /// Example: Ethereum transactions receipts. - type Extra: Clone + Send + Sync + PartialEq + std::fmt::Debug; - /// Type of data required to 'complete' header that we're receiving from the source node: - /// 1) completion data is required for some headers; - /// 2) target node can't answer if it'll require completion data before header is accepted; - /// 3) completion data may be generated after header generation; - /// 4) header and completion data are submitted in separate transactions. - /// - /// Example: Substrate GRANDPA justifications. - type Completion: Clone + Send + Sync + std::fmt::Debug; - - /// Function used to estimate size of target-encoded header. - fn estimate_size(source: &QueuedHeader) -> usize; -} - -/// A HeaderId for `HeaderSyncPipeline`. -pub type HeaderIdOf

= HeaderId<

::Hash,

::Number>; - -/// Header that we're receiving from source node. -pub trait SourceHeader: Clone + std::fmt::Debug + PartialEq + Send + Sync { - /// Returns ID of header. - fn id(&self) -> HeaderId; - /// Returns ID of parent header. - /// - /// Panics if called for genesis header. - fn parent_id(&self) -> HeaderId; -} - -/// Header how it's stored in the synchronization queue. -#[derive(Clone, Debug, PartialEq)] -pub struct QueuedHeader(Arc>); - -impl QueuedHeader

{ - /// Creates new queued header. - pub fn new(header: P::Header) -> Self { - QueuedHeader(Arc::new(QueuedHeaderData { header, extra: None })) - } - - /// Set associated extra data. - pub fn set_extra(self, extra: P::Extra) -> Self { - QueuedHeader(Arc::new(QueuedHeaderData { - header: Arc::try_unwrap(self.0) - .map(|data| data.header) - .unwrap_or_else(|data| data.header.clone()), - extra: Some(extra), - })) - } -} - -impl Deref for QueuedHeader

{ - type Target = QueuedHeaderData

; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// Header how it's stored in the synchronization queue. -#[derive(Clone, Debug, Default, PartialEq)] -pub struct QueuedHeaderData { - header: P::Header, - extra: Option, -} - -impl QueuedHeader

{ - /// Returns ID of header. - pub fn id(&self) -> HeaderId { - self.header.id() - } - - /// Returns ID of parent header. - pub fn parent_id(&self) -> HeaderId { - self.header.parent_id() - } - - /// Returns reference to header. - pub fn header(&self) -> &P::Header { - &self.header - } - - /// Returns reference to associated extra data. - pub fn extra(&self) -> &Option { - &self.extra - } -} - -/// Headers submission result. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct SubmittedHeaders { - /// IDs of headers that have been submitted to target node. - pub submitted: Vec, - /// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted` vec), - /// but all descendants are not. - pub incomplete: Vec, - /// IDs of ignored headers that we have decided not to submit (they're either rejected by - /// target node immediately, or they're descendants of incomplete headers). - pub rejected: Vec, - /// Fatal target node error, if it has occured during submission. - pub fatal_error: Option, -} - -impl Default for SubmittedHeaders { - fn default() -> Self { - SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - } - } -} - -impl std::fmt::Display for SubmittedHeaders { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let submitted = format_ids(self.submitted.iter()); - let incomplete = format_ids(self.incomplete.iter()); - let rejected = format_ids(self.rejected.iter()); - - write!( - f, - "Submitted: {}, Incomplete: {}, Rejected: {}", - submitted, incomplete, rejected - ) - } -} diff --git a/polkadot/bridges/relays/kusama-client/Cargo.toml b/polkadot/bridges/relays/kusama-client/Cargo.toml deleted file mode 100644 index 04958cf2b3618c5ebfd52df9e6c0e2606fa7889e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/kusama-client/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "relay-kusama-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } -relay-utils = { path = "../utils" } - -# Bridge dependencies - -bp-kusama = { path = "../../primitives/kusama" } - -# Substrate Dependencies - -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/kusama-client/src/lib.rs b/polkadot/bridges/relays/kusama-client/src/lib.rs deleted file mode 100644 index 9f9507f5ca62ef82ee06d27998e4c7322536ebaa..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/kusama-client/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Kusama chain. - -use relay_substrate_client::{Chain, ChainBase}; -use std::time::Duration; - -/// Kusama header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Kusama chain definition -#[derive(Debug, Clone, Copy)] -pub struct Kusama; - -impl ChainBase for Kusama { - type BlockNumber = bp_kusama::BlockNumber; - type Hash = bp_kusama::Hash; - type Hasher = bp_kusama::Hasher; - type Header = bp_kusama::Header; -} - -impl Chain for Kusama { - const NAME: &'static str = "Kusama"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); - - type AccountId = bp_kusama::AccountId; - type Index = bp_kusama::Nonce; - type SignedBlock = bp_kusama::SignedBlock; - type Call = (); -} - -/// Kusama header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/bridges/relays/messages-relay/Cargo.toml b/polkadot/bridges/relays/messages-relay/Cargo.toml deleted file mode 100644 index 9c2daefdb4271b93b733a4b9ed67de5b2e64a79e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "messages-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -futures = "0.3.5" -hex = "0.4" -log = "0.4.11" -parking_lot = "0.11.0" - -# Bridge Dependencies - -bp-message-lane = { path = "../../primitives/message-lane" } -relay-utils = { path = "../utils" } diff --git a/polkadot/bridges/relays/messages-relay/src/lib.rs b/polkadot/bridges/relays/messages-relay/src/lib.rs deleted file mode 100644 index 99222f0e02fbc5c6eb3d980c6789d5fad1d9b7a7..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/lib.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying [`message-lane`](../pallet_message_lane/index.html) application specific -//! data. Message lane allows sending arbitrary messages between bridged chains. This -//! module provides entrypoint that starts reading messages from given message lane -//! of source chain and submits proof-of-message-at-source-chain transactions to the -//! target chain. Additionaly, proofs-of-messages-delivery are sent back from the -//! target chain to the source chain. - -// required for futures::select! -#![recursion_limit = "1024"] -#![warn(missing_docs)] - -mod metrics; - -pub mod message_lane; -pub mod message_lane_loop; - -mod message_race_delivery; -mod message_race_loop; -mod message_race_receiving; -mod message_race_strategy; diff --git a/polkadot/bridges/relays/messages-relay/src/message_lane.rs b/polkadot/bridges/relays/messages-relay/src/message_lane.rs deleted file mode 100644 index 0eab02ae299f28f342823d3a72830cf14d8ce19d..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/message_lane.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! One-way message lane types. Within single one-way lane we have three 'races' where we try to: -//! -//! 1) relay new messages from source to target node; -//! 2) relay proof-of-delivery from target to source node. - -use relay_utils::{BlockNumberBase, HeaderId}; -use std::fmt::Debug; - -/// One-way message lane. -pub trait MessageLane: Clone + Send + Sync { - /// Name of the messages source. - const SOURCE_NAME: &'static str; - /// Name of the messages target. - const TARGET_NAME: &'static str; - - /// Messages proof. - type MessagesProof: Clone + Debug + Send + Sync; - /// Messages receiving proof. - type MessagesReceivingProof: Clone + Debug + Send + Sync; - - /// Number of the source header. - type SourceHeaderNumber: BlockNumberBase; - /// Hash of the source header. - type SourceHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; - - /// Number of the target header. - type TargetHeaderNumber: BlockNumberBase; - /// Hash of the target header. - type TargetHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; -} - -/// Source header id within given one-way message lane. -pub type SourceHeaderIdOf

= HeaderId<

::SourceHeaderHash,

::SourceHeaderNumber>; - -/// Target header id within given one-way message lane. -pub type TargetHeaderIdOf

= HeaderId<

::TargetHeaderHash,

::TargetHeaderNumber>; diff --git a/polkadot/bridges/relays/messages-relay/src/message_lane_loop.rs b/polkadot/bridges/relays/messages-relay/src/message_lane_loop.rs deleted file mode 100644 index 28b55dba47cc7dc84767680f598b1b71e9bfc002..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/message_lane_loop.rs +++ /dev/null @@ -1,841 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Message delivery loop. Designed to work with message-lane pallet. -//! -//! Single relay instance delivers messages of single lane in single direction. -//! To serve two-way lane, you would need two instances of relay. -//! To serve N two-way lanes, you would need N*2 instances of relay. -//! -//! Please keep in mind that the best header in this file is actually best -//! finalized header. I.e. when talking about headers in lane context, we -//! only care about finalized headers. - -use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use crate::message_race_delivery::run as run_message_delivery_race; -use crate::message_race_receiving::run as run_message_receiving_race; -use crate::metrics::MessageLaneLoopMetrics; - -use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; -use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; -use relay_utils::{ - interval, - metrics::{start as metrics_start, GlobalMetrics, MetricsParams}, - process_future_result, - relay_loop::Client as RelayClient, - retry_backoff, FailedClient, -}; -use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration}; - -/// Message lane loop configuration params. -#[derive(Debug, Clone)] -pub struct Params { - /// Id of lane this loop is servicing. - pub lane: LaneId, - /// Interval at which we ask target node about its updates. - pub source_tick: Duration, - /// Interval at which we ask target node about its updates. - pub target_tick: Duration, - /// Delay between moments when connection error happens and our reconnect attempt. - pub reconnect_delay: Duration, - /// The loop will auto-restart if there has been no updates during this period. - pub stall_timeout: Duration, - /// Message delivery race parameters. - pub delivery_params: MessageDeliveryParams, -} - -/// Message delivery race parameters. -#[derive(Debug, Clone)] -pub struct MessageDeliveryParams { - /// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number of entries - /// in the `InboundLaneData::relayers` set, all new messages will be rejected until reward payment will - /// be proved (by including outbound lane state to the message delivery transaction). - pub max_unrewarded_relayer_entries_at_target: MessageNonce, - /// Message delivery race will stop delivering messages if there are `max_unconfirmed_nonces_at_target` - /// unconfirmed nonces on the target node. The race would continue once they're confirmed by the - /// receiving race. - pub max_unconfirmed_nonces_at_target: MessageNonce, - /// Maximal number of relayed messages in single delivery transaction. - pub max_messages_in_single_batch: MessageNonce, - /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. - pub max_messages_weight_in_single_batch: Weight, - /// Maximal cumulative size of relayed messages in single delivery transaction. - pub max_messages_size_in_single_batch: usize, -} - -/// Message weights. -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct MessageWeights { - /// Message dispatch weight. - pub weight: Weight, - /// Message size (number of bytes in encoded payload). - pub size: usize, -} - -/// Messages weights map. -pub type MessageWeightsMap = BTreeMap; - -/// Message delivery race proof parameters. -#[derive(Debug, PartialEq)] -pub struct MessageProofParameters { - /// Include outbound lane state proof? - pub outbound_state_proof_required: bool, - /// Cumulative dispatch weight of messages that we're building proof for. - pub dispatch_weight: Weight, -} - -/// Source client trait. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Returns state of the client. - async fn state(&self) -> Result, Self::Error>; - - /// Get nonce of instance of latest generated message. - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; - /// Get nonce of the latest message, which receiving has been confirmed by the target chain. - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; - - /// Returns mapping of message nonces, generated on this client, to their weights. - /// - /// Some weights may be missing from returned map, if corresponding messages were pruned at - /// the source chain. - async fn generated_messages_weights( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - ) -> Result; - - /// Prove messages in inclusive range [begin; end]. - async fn prove_messages( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error>; - - /// Submit messages receiving proof. - async fn submit_messages_receiving_proof( - &self, - generated_at_block: TargetHeaderIdOf

, - proof: P::MessagesReceivingProof, - ) -> Result<(), Self::Error>; -} - -/// Target client trait. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Returns state of the client. - async fn state(&self) -> Result, Self::Error>; - - /// Get nonce of latest received message. - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - - /// Get nonce of latest confirmed message. - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - /// Get state of unrewarded relayers set at the inbound lane. - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, UnrewardedRelayersState), Self::Error>; - - /// Prove messages receiving at given block. - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), Self::Error>; - - /// Submit messages proof. - async fn submit_messages_proof( - &self, - generated_at_header: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, Self::Error>; -} - -/// State of the client. -#[derive(Clone, Debug, Default, PartialEq)] -pub struct ClientState { - /// Best header id of this chain. - pub best_self: SelfHeaderId, - /// Best finalized header id of this chain. - pub best_finalized_self: SelfHeaderId, - /// Best finalized header id of the peer chain read at the best block of this chain (at `best_finalized_self`). - pub best_finalized_peer_at_best_self: PeerHeaderId, -} - -/// State of source client in one-way message lane. -pub type SourceClientState

= ClientState, TargetHeaderIdOf

>; - -/// State of target client in one-way message lane. -pub type TargetClientState

= ClientState, SourceHeaderIdOf

>; - -/// Both clients state. -#[derive(Debug, Default)] -pub struct ClientsState { - /// Source client state. - pub source: Option>, - /// Target client state. - pub target: Option>, -} - -/// Run message lane service loop. -pub fn run( - params: Params, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_params: Option, - exit_signal: impl Future, -) { - let exit_signal = exit_signal.shared(); - let metrics_global = GlobalMetrics::default(); - let metrics_msg = MessageLaneLoopMetrics::default(); - let metrics_enabled = metrics_params.is_some(); - metrics_start( - format!( - "{}_to_{}_MessageLane_{}", - P::SOURCE_NAME, - P::TARGET_NAME, - hex::encode(params.lane) - ), - metrics_params, - &metrics_global, - &metrics_msg, - ); - - relay_utils::relay_loop::run( - params.reconnect_delay, - source_client, - target_client, - |source_client, target_client| { - run_until_connection_lost( - params.clone(), - source_client, - target_client, - if metrics_enabled { - Some(metrics_global.clone()) - } else { - None - }, - if metrics_enabled { - Some(metrics_msg.clone()) - } else { - None - }, - exit_signal.clone(), - ) - }, - ); -} - -/// Run one-way message delivery loop until connection with target or source node is lost, or exit signal is received. -async fn run_until_connection_lost, TC: TargetClient

>( - params: Params, - source_client: SC, - target_client: TC, - metrics_global: Option, - metrics_msg: Option, - exit_signal: impl Future, -) -> Result<(), FailedClient> { - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = false; - let mut source_state_required = true; - let source_state = source_client.state().fuse(); - let source_go_offline_future = futures::future::Fuse::terminated(); - let source_tick_stream = interval(params.source_tick).fuse(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = false; - let mut target_state_required = true; - let target_state = target_client.state().fuse(); - let target_go_offline_future = futures::future::Fuse::terminated(); - let target_tick_stream = interval(params.target_tick).fuse(); - - let ( - (delivery_source_state_sender, delivery_source_state_receiver), - (delivery_target_state_sender, delivery_target_state_receiver), - ) = (unbounded(), unbounded()); - let delivery_race_loop = run_message_delivery_race( - source_client.clone(), - delivery_source_state_receiver, - target_client.clone(), - delivery_target_state_receiver, - params.stall_timeout, - metrics_msg.clone(), - params.delivery_params, - ) - .fuse(); - - let ( - (receiving_source_state_sender, receiving_source_state_receiver), - (receiving_target_state_sender, receiving_target_state_receiver), - ) = (unbounded(), unbounded()); - let receiving_race_loop = run_message_receiving_race( - source_client.clone(), - receiving_source_state_receiver, - target_client.clone(), - receiving_target_state_receiver, - params.stall_timeout, - metrics_msg.clone(), - ) - .fuse(); - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!( - source_state, - source_go_offline_future, - source_tick_stream, - target_state, - target_go_offline_future, - target_tick_stream, - delivery_race_loop, - receiving_race_loop, - exit_signal - ); - - loop { - futures::select! { - new_source_state = source_state => { - source_state_required = false; - - source_client_is_online = process_future_result( - new_source_state, - &mut source_retry_backoff, - |new_source_state| { - log::debug!( - target: "bridge", - "Received state from {} node: {:?}", - P::SOURCE_NAME, - new_source_state, - ); - let _ = delivery_source_state_sender.unbounded_send(new_source_state.clone()); - let _ = receiving_source_state_sender.unbounded_send(new_source_state.clone()); - - if let Some(metrics_msg) = metrics_msg.as_ref() { - metrics_msg.update_source_state::

(new_source_state); - } - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving state from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = source_tick_stream.next() => { - source_state_required = true; - }, - new_target_state = target_state => { - target_state_required = false; - - target_client_is_online = process_future_result( - new_target_state, - &mut target_retry_backoff, - |new_target_state| { - log::debug!( - target: "bridge", - "Received state from {} node: {:?}", - P::TARGET_NAME, - new_target_state, - ); - let _ = delivery_target_state_sender.unbounded_send(new_target_state.clone()); - let _ = receiving_target_state_sender.unbounded_send(new_target_state.clone()); - - if let Some(metrics_msg) = metrics_msg.as_ref() { - metrics_msg.update_target_state::

(new_target_state); - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving state from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - _ = target_tick_stream.next() => { - target_state_required = true; - }, - - delivery_error = delivery_race_loop => { - match delivery_error { - Ok(_) => unreachable!("only ends with error; qed"), - Err(err) => return Err(err), - } - }, - receiving_error = receiving_race_loop => { - match receiving_error { - Ok(_) => unreachable!("only ends with error; qed"), - Err(err) => return Err(err), - } - }, - - () = exit_signal => { - return Ok(()); - } - } - - if let Some(ref metrics_global) = metrics_global { - metrics_global.update().await; - } - - if source_client_is_online && source_state_required { - log::debug!(target: "bridge", "Asking {} node about its state", P::SOURCE_NAME); - source_state.set(source_client.state().fuse()); - source_client_is_online = false; - } - - if target_client_is_online && target_state_required { - log::debug!(target: "bridge", "Asking {} node about its state", P::TARGET_NAME); - target_state.set(target_client.state().fuse()); - target_client_is_online = false; - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use futures::stream::StreamExt; - use parking_lot::Mutex; - use relay_utils::{HeaderId, MaybeConnectionError}; - use std::sync::Arc; - - pub fn header_id(number: TestSourceHeaderNumber) -> TestSourceHeaderId { - HeaderId(number, number) - } - - pub type TestSourceHeaderId = HeaderId; - pub type TestTargetHeaderId = HeaderId; - - pub type TestMessagesProof = (RangeInclusive, Option); - pub type TestMessagesReceivingProof = MessageNonce; - - pub type TestSourceHeaderNumber = u64; - pub type TestSourceHeaderHash = u64; - - pub type TestTargetHeaderNumber = u64; - pub type TestTargetHeaderHash = u64; - - #[derive(Debug)] - pub struct TestError; - - impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - true - } - } - - #[derive(Clone)] - pub struct TestMessageLane; - - impl MessageLane for TestMessageLane { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type MessagesProof = TestMessagesProof; - type MessagesReceivingProof = TestMessagesReceivingProof; - - type SourceHeaderNumber = TestSourceHeaderNumber; - type SourceHeaderHash = TestSourceHeaderHash; - - type TargetHeaderNumber = TestTargetHeaderNumber; - type TargetHeaderHash = TestTargetHeaderHash; - } - - #[derive(Debug, Default, Clone)] - pub struct TestClientData { - is_source_fails: bool, - is_source_reconnected: bool, - source_state: SourceClientState, - source_latest_generated_nonce: MessageNonce, - source_latest_confirmed_received_nonce: MessageNonce, - submitted_messages_receiving_proofs: Vec, - is_target_fails: bool, - is_target_reconnected: bool, - target_state: SourceClientState, - target_latest_received_nonce: MessageNonce, - target_latest_confirmed_received_nonce: MessageNonce, - submitted_messages_proofs: Vec, - } - - #[derive(Clone)] - pub struct TestSourceClient { - data: Arc>, - tick: Arc, - } - - #[async_trait] - impl RelayClient for TestSourceClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - data.is_source_reconnected = true; - } - Ok(()) - } - } - - #[async_trait] - impl SourceClient for TestSourceClient { - async fn state(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_source_fails { - return Err(TestError); - } - Ok(data.source_state.clone()) - } - - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf, - ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_source_fails { - return Err(TestError); - } - Ok((id, data.source_latest_generated_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf, - ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - Ok((id, data.source_latest_confirmed_received_nonce)) - } - - async fn generated_messages_weights( - &self, - _id: SourceHeaderIdOf, - nonces: RangeInclusive, - ) -> Result { - Ok(nonces - .map(|nonce| (nonce, MessageWeights { weight: 1, size: 1 })) - .collect()) - } - - async fn prove_messages( - &self, - id: SourceHeaderIdOf, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result< - ( - SourceHeaderIdOf, - RangeInclusive, - TestMessagesProof, - ), - TestError, - > { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - Ok(( - id, - nonces.clone(), - ( - nonces, - if proof_parameters.outbound_state_proof_required { - Some(data.source_latest_confirmed_received_nonce) - } else { - None - }, - ), - )) - } - - async fn submit_messages_receiving_proof( - &self, - _generated_at_block: TargetHeaderIdOf, - proof: TestMessagesReceivingProof, - ) -> Result<(), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - data.submitted_messages_receiving_proofs.push(proof); - data.source_latest_confirmed_received_nonce = proof; - Ok(()) - } - } - - #[derive(Clone)] - pub struct TestTargetClient { - data: Arc>, - tick: Arc, - } - - #[async_trait] - impl RelayClient for TestTargetClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - data.is_target_reconnected = true; - } - Ok(()) - } - } - - #[async_trait] - impl TargetClient for TestTargetClient { - async fn state(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError); - } - Ok(data.target_state.clone()) - } - - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError); - } - Ok((id, data.target_latest_received_nonce)) - } - - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, UnrewardedRelayersState), TestError> { - Ok(( - id, - UnrewardedRelayersState { - unrewarded_relayer_entries: 0, - messages_in_oldest_entry: 0, - total_messages: 0, - }, - )) - } - - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError); - } - Ok((id, data.target_latest_confirmed_received_nonce)) - } - - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, TestMessagesReceivingProof), TestError> { - Ok((id, self.data.lock().target_latest_received_nonce)) - } - - async fn submit_messages_proof( - &self, - _generated_at_header: SourceHeaderIdOf, - nonces: RangeInclusive, - proof: TestMessagesProof, - ) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError); - } - data.target_state.best_self = - HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); - data.target_latest_received_nonce = *proof.0.end(); - if let Some(target_latest_confirmed_received_nonce) = proof.1 { - data.target_latest_confirmed_received_nonce = target_latest_confirmed_received_nonce; - } - data.submitted_messages_proofs.push(proof); - Ok(nonces) - } - } - - fn run_loop_test( - data: TestClientData, - source_tick: Arc, - target_tick: Arc, - exit_signal: impl Future, - ) -> TestClientData { - async_std::task::block_on(async { - let data = Arc::new(Mutex::new(data)); - - let source_client = TestSourceClient { - data: data.clone(), - tick: source_tick, - }; - let target_client = TestTargetClient { - data: data.clone(), - tick: target_tick, - }; - run( - Params { - lane: [0, 0, 0, 0], - source_tick: Duration::from_millis(100), - target_tick: Duration::from_millis(100), - reconnect_delay: Duration::from_millis(0), - stall_timeout: Duration::from_millis(60 * 1000), - delivery_params: MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: 4, - max_unconfirmed_nonces_at_target: 4, - max_messages_in_single_batch: 4, - max_messages_weight_in_single_batch: 4, - max_messages_size_in_single_batch: 4, - }, - }, - source_client, - target_client, - None, - exit_signal, - ); - let result = data.lock().clone(); - result - }) - } - - #[test] - fn message_lane_loop_is_able_to_recover_from_connection_errors() { - // with this configuration, source client will return Err, making source client - // reconnect. Then the target client will fail with Err + reconnect. Then we finally - // able to deliver messages. - let (exit_sender, exit_receiver) = unbounded(); - let result = run_loop_test( - TestClientData { - is_source_fails: true, - source_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - source_latest_generated_nonce: 1, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - target_latest_received_nonce: 0, - ..Default::default() - }, - Arc::new(|data: &mut TestClientData| { - if data.is_source_reconnected { - data.is_source_fails = false; - data.is_target_fails = true; - } - }), - Arc::new(move |data: &mut TestClientData| { - if data.is_target_reconnected { - data.is_target_fails = false; - } - if data.target_state.best_finalized_peer_at_best_self.0 < 10 { - data.target_state.best_finalized_peer_at_best_self = HeaderId( - data.target_state.best_finalized_peer_at_best_self.0 + 1, - data.target_state.best_finalized_peer_at_best_self.0 + 1, - ); - } - if !data.submitted_messages_proofs.is_empty() { - exit_sender.unbounded_send(()).unwrap(); - } - }), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - assert_eq!(result.submitted_messages_proofs, vec![(1..=1, None)],); - } - - #[test] - fn message_lane_loop_works() { - let (exit_sender, exit_receiver) = unbounded(); - let result = run_loop_test( - TestClientData { - source_state: ClientState { - best_self: HeaderId(10, 10), - best_finalized_self: HeaderId(10, 10), - best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - source_latest_generated_nonce: 10, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - target_latest_received_nonce: 0, - ..Default::default() - }, - Arc::new(|_: &mut TestClientData| {}), - Arc::new(move |data: &mut TestClientData| { - // syncing source headers -> target chain (all at once) - if data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_finalized_self.0 { - data.target_state.best_finalized_peer_at_best_self = data.source_state.best_finalized_self; - } - // syncing source headers -> target chain (all at once) - if data.source_state.best_finalized_peer_at_best_self.0 < data.target_state.best_finalized_self.0 { - data.source_state.best_finalized_peer_at_best_self = data.target_state.best_finalized_self; - } - // if target has received messages batch => increase blocks so that confirmations may be sent - if data.target_latest_received_nonce == 4 - || data.target_latest_received_nonce == 8 - || data.target_latest_received_nonce == 10 - { - data.target_state.best_self = - HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.0 + 1); - data.target_state.best_finalized_self = data.target_state.best_self; - data.source_state.best_self = - HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.0 + 1); - data.source_state.best_finalized_self = data.source_state.best_self; - } - // if source has received all messages receiving confirmations => increase source block so that confirmations may be sent - if data.source_latest_confirmed_received_nonce == 10 { - exit_sender.unbounded_send(()).unwrap(); - } - }), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - // there are no strict restrictions on when reward confirmation should come - // (because `max_unconfirmed_nonces_at_target` is `100` in tests and this confirmation - // depends on the state of both clients) - // => we do not check it here - assert_eq!(result.submitted_messages_proofs[0].0, 1..=4); - assert_eq!(result.submitted_messages_proofs[1].0, 5..=8); - assert_eq!(result.submitted_messages_proofs[2].0, 9..=10); - assert!(!result.submitted_messages_receiving_proofs.is_empty()); - } -} diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_delivery.rs b/polkadot/bridges/relays/messages-relay/src/message_race_delivery.rs deleted file mode 100644 index c7e308fee1e18c16a954c72ff01bc578a6759294..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/message_race_delivery.rs +++ /dev/null @@ -1,871 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Message delivery race delivers proof-of-messages from lane.source to lane.target. - -use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use crate::message_lane_loop::{ - MessageDeliveryParams, MessageProofParameters, MessageWeightsMap, SourceClient as MessageLaneSourceClient, - SourceClientState, TargetClient as MessageLaneTargetClient, TargetClientState, -}; -use crate::message_race_loop::{ - MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, TargetClient, - TargetClientNonces, -}; -use crate::message_race_strategy::BasicStrategy; -use crate::metrics::MessageLaneLoopMetrics; - -use async_trait::async_trait; -use bp_message_lane::{MessageNonce, UnrewardedRelayersState, Weight}; -use futures::stream::FusedStream; -use relay_utils::FailedClient; -use std::{ - collections::{BTreeMap, VecDeque}, - marker::PhantomData, - ops::RangeInclusive, - time::Duration, -}; - -/// Run message delivery race. -pub async fn run( - source_client: impl MessageLaneSourceClient

, - source_state_updates: impl FusedStream>, - target_client: impl MessageLaneTargetClient

, - target_state_updates: impl FusedStream>, - stall_timeout: Duration, - metrics_msg: Option, - params: MessageDeliveryParams, -) -> Result<(), FailedClient> { - crate::message_race_loop::run( - MessageDeliveryRaceSource { - client: source_client, - metrics_msg: metrics_msg.clone(), - _phantom: Default::default(), - }, - source_state_updates, - MessageDeliveryRaceTarget { - client: target_client, - metrics_msg, - _phantom: Default::default(), - }, - target_state_updates, - stall_timeout, - MessageDeliveryStrategy::

{ - max_unrewarded_relayer_entries_at_target: params.max_unrewarded_relayer_entries_at_target, - max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, - max_messages_in_single_batch: params.max_messages_in_single_batch, - max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch, - max_messages_size_in_single_batch: params.max_messages_size_in_single_batch, - latest_confirmed_nonces_at_source: VecDeque::new(), - target_nonces: None, - strategy: BasicStrategy::new(), - }, - ) - .await -} - -/// Message delivery race. -struct MessageDeliveryRace

(std::marker::PhantomData

); - -impl MessageRace for MessageDeliveryRace

{ - type SourceHeaderId = SourceHeaderIdOf

; - type TargetHeaderId = TargetHeaderIdOf

; - - type MessageNonce = MessageNonce; - type Proof = P::MessagesProof; - - fn source_name() -> String { - format!("{}::MessagesDelivery", P::SOURCE_NAME) - } - - fn target_name() -> String { - format!("{}::MessagesDelivery", P::TARGET_NAME) - } -} - -/// Message delivery race source, which is a source of the lane. -struct MessageDeliveryRaceSource { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl SourceClient> for MessageDeliveryRaceSource -where - P: MessageLane, - C: MessageLaneSourceClient

, -{ - type Error = C::Error; - type NoncesRange = MessageWeightsMap; - type ProofParameters = MessageProofParameters; - - async fn nonces( - &self, - at_block: SourceHeaderIdOf

, - prev_latest_nonce: MessageNonce, - ) -> Result<(SourceHeaderIdOf

, SourceClientNonces), Self::Error> { - let (at_block, latest_generated_nonce) = self.client.latest_generated_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; - - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_source_latest_generated_nonce::

(latest_generated_nonce); - metrics_msg.update_source_latest_confirmed_nonce::

(latest_confirmed_nonce); - } - - let new_nonces = if latest_generated_nonce > prev_latest_nonce { - self.client - .generated_messages_weights(at_block.clone(), prev_latest_nonce + 1..=latest_generated_nonce) - .await? - } else { - MessageWeightsMap::new() - }; - - Ok(( - at_block, - SourceClientNonces { - new_nonces, - confirmed_nonce: Some(latest_confirmed_nonce), - }, - )) - } - - async fn generate_proof( - &self, - at_block: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof_parameters: Self::ProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error> { - self.client.prove_messages(at_block, nonces, proof_parameters).await - } -} - -/// Message delivery race target, which is a target of the lane. -struct MessageDeliveryRaceTarget { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl TargetClient> for MessageDeliveryRaceTarget -where - P: MessageLane, - C: MessageLaneTargetClient

, -{ - type Error = C::Error; - type TargetNoncesData = DeliveryRaceTargetNoncesData; - - async fn nonces( - &self, - at_block: TargetHeaderIdOf

, - update_metrics: bool, - ) -> Result<(TargetHeaderIdOf

, TargetClientNonces), Self::Error> { - let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; - let (at_block, unrewarded_relayers) = self.client.unrewarded_relayers_state(at_block).await?; - - if update_metrics { - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_target_latest_received_nonce::

(latest_received_nonce); - metrics_msg.update_target_latest_confirmed_nonce::

(latest_confirmed_nonce); - } - } - - Ok(( - at_block, - TargetClientNonces { - latest_nonce: latest_received_nonce, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: latest_confirmed_nonce, - unrewarded_relayers, - }, - }, - )) - } - - async fn submit_proof( - &self, - generated_at_block: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, Self::Error> { - self.client - .submit_messages_proof(generated_at_block, nonces, proof) - .await - } -} - -/// Additional nonces data from the target client used by message delivery race. -#[derive(Debug, Clone)] -struct DeliveryRaceTargetNoncesData { - /// Latest nonce that we know: (1) has been delivered to us (2) has been confirmed - /// back to the source node (by confirmations race) and (3) relayer has received - /// reward for (and this has been confirmed by the message delivery race). - confirmed_nonce: MessageNonce, - /// State of the unrewarded relayers set at the target node. - unrewarded_relayers: UnrewardedRelayersState, -} - -/// Messages delivery strategy. -struct MessageDeliveryStrategy { - /// Maximal unrewarded relayer entries at target client. - max_unrewarded_relayer_entries_at_target: MessageNonce, - /// Maximal unconfirmed nonces at target client. - max_unconfirmed_nonces_at_target: MessageNonce, - /// Maximal number of messages in the single delivery transaction. - max_messages_in_single_batch: MessageNonce, - /// Maximal cumulative messages weight in the single delivery transaction. - max_messages_weight_in_single_batch: Weight, - /// Maximal messages size in the single delivery transaction. - max_messages_size_in_single_batch: usize, - /// Latest confirmed nonces at the source client + the header id where we have first met this nonce. - latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf

, MessageNonce)>, - /// Target nonces from the source client. - target_nonces: Option>, - /// Basic delivery strategy. - strategy: MessageDeliveryStrategyBase

, -} - -type MessageDeliveryStrategyBase

= BasicStrategy< -

::SourceHeaderNumber, -

::SourceHeaderHash, -

::TargetHeaderNumber, -

::TargetHeaderHash, - MessageWeightsMap, -

::MessagesProof, ->; - -impl std::fmt::Debug for MessageDeliveryStrategy

{ - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("MessageDeliveryStrategy") - .field( - "max_unrewarded_relayer_entries_at_target", - &self.max_unrewarded_relayer_entries_at_target, - ) - .field( - "max_unconfirmed_nonces_at_target", - &self.max_unconfirmed_nonces_at_target, - ) - .field("max_messages_in_single_batch", &self.max_messages_in_single_batch) - .field( - "max_messages_weight_in_single_batch", - &self.max_messages_weight_in_single_batch, - ) - .field( - "max_messages_size_in_single_batch", - &self.max_messages_size_in_single_batch, - ) - .field( - "latest_confirmed_nonces_at_source", - &self.latest_confirmed_nonces_at_source, - ) - .field("target_nonces", &self.target_nonces) - .field("strategy", &self.strategy) - .finish() - } -} - -impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> - for MessageDeliveryStrategy

-{ - type SourceNoncesRange = MessageWeightsMap; - type ProofParameters = MessageProofParameters; - type TargetNoncesData = DeliveryRaceTargetNoncesData; - - fn is_empty(&self) -> bool { - self.strategy.is_empty() - } - - fn best_at_source(&self) -> Option { - self.strategy.best_at_source() - } - - fn best_at_target(&self) -> Option { - self.strategy.best_at_target() - } - - fn source_nonces_updated( - &mut self, - at_block: SourceHeaderIdOf

, - nonces: SourceClientNonces, - ) { - if let Some(confirmed_nonce) = nonces.confirmed_nonce { - let is_confirmed_nonce_updated = self - .latest_confirmed_nonces_at_source - .back() - .map(|(_, prev_nonce)| *prev_nonce != confirmed_nonce) - .unwrap_or(true); - if is_confirmed_nonce_updated { - self.latest_confirmed_nonces_at_source - .push_back((at_block.clone(), confirmed_nonce)); - } - } - self.strategy.source_nonces_updated(at_block, nonces) - } - - fn best_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, TargetHeaderIdOf

, P::MessagesProof>, - ) { - // best target nonces must always be ge than finalized target nonces - let mut target_nonces = self.target_nonces.take().unwrap_or_else(|| nonces.clone()); - target_nonces.nonces_data = nonces.nonces_data.clone(); - target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); - self.target_nonces = Some(target_nonces); - - self.strategy.best_target_nonces_updated( - TargetClientNonces { - latest_nonce: nonces.latest_nonce, - nonces_data: (), - }, - race_state, - ) - } - - fn finalized_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, TargetHeaderIdOf

, P::MessagesProof>, - ) { - if let Some(ref best_finalized_source_header_id_at_best_target) = - race_state.best_finalized_source_header_id_at_best_target - { - let oldest_header_number_to_keep = best_finalized_source_header_id_at_best_target.0; - while self - .latest_confirmed_nonces_at_source - .front() - .map(|(id, _)| id.0 < oldest_header_number_to_keep) - .unwrap_or(false) - { - self.latest_confirmed_nonces_at_source.pop_front(); - } - } - - if let Some(ref mut target_nonces) = self.target_nonces { - target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); - } - - self.strategy.finalized_target_nonces_updated( - TargetClientNonces { - latest_nonce: nonces.latest_nonce, - nonces_data: (), - }, - race_state, - ) - } - - fn select_nonces_to_deliver( - &mut self, - race_state: &RaceState, TargetHeaderIdOf

, P::MessagesProof>, - ) -> Option<(RangeInclusive, Self::ProofParameters)> { - let best_finalized_source_header_id_at_best_target = - race_state.best_finalized_source_header_id_at_best_target.clone()?; - let latest_confirmed_nonce_at_source = self - .latest_confirmed_nonces_at_source - .iter() - .take_while(|(id, _)| id.0 <= best_finalized_source_header_id_at_best_target.0) - .last() - .map(|(_, nonce)| *nonce)?; - let target_nonces = self.target_nonces.as_ref()?; - - // There's additional condition in the message delivery race: target would reject messages - // if there are too much unconfirmed messages at the inbound lane. - - // The receiving race is responsible to deliver confirmations back to the source chain. So if - // there's a lot of unconfirmed messages, let's wait until it'll be able to do its job. - let latest_received_nonce_at_target = target_nonces.latest_nonce; - let confirmations_missing = latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source); - match confirmations_missing { - Some(confirmations_missing) if confirmations_missing >= self.max_unconfirmed_nonces_at_target => { - log::debug!( - target: "bridge", - "Cannot deliver any more messages from {} to {}. Too many unconfirmed nonces \ - at target: target.latest_received={:?}, source.latest_confirmed={:?}, max={:?}", - MessageDeliveryRace::

::source_name(), - MessageDeliveryRace::

::target_name(), - latest_received_nonce_at_target, - latest_confirmed_nonce_at_source, - self.max_unconfirmed_nonces_at_target, - ); - - return None; - } - _ => (), - } - - // Ok - we may have new nonces to deliver. But target may still reject new messages, because we haven't - // notified it that (some) messages have been confirmed. So we may want to include updated - // `source.latest_confirmed` in the proof. - // - // Important note: we're including outbound state lane proof whenever there are unconfirmed nonces - // on the target chain. Other strategy is to include it only if it's absolutely necessary. - let latest_confirmed_nonce_at_target = target_nonces.nonces_data.confirmed_nonce; - let outbound_state_proof_required = latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source; - - // The target node would also reject messages if there are too many entries in the - // "unrewarded relayers" set. If we are unable to prove new rewards to the target node, then - // we should wait for confirmations race. - let unrewarded_relayer_entries_limit_reached = - target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries - >= self.max_unrewarded_relayer_entries_at_target; - if unrewarded_relayer_entries_limit_reached { - // so there are already too many unrewarded relayer entries in the set - // - // => check if we can prove enough rewards. If not, we should wait for more rewards to be paid - let number_of_rewards_being_proved = - latest_confirmed_nonce_at_source.saturating_sub(latest_confirmed_nonce_at_target); - let enough_rewards_being_proved = number_of_rewards_being_proved - >= target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry; - if !enough_rewards_being_proved { - return None; - } - } - - // If we're here, then the confirmations race did its job && sending side now knows that messages - // have been delivered. Now let's select nonces that we want to deliver. - // - // We may deliver at most: - // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_target) - // - // messages in the batch. But since we're including outbound state proof in the batch, then it - // may be increased to: - // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_source) - let future_confirmed_nonce_at_target = if outbound_state_proof_required { - latest_confirmed_nonce_at_source - } else { - latest_confirmed_nonce_at_target - }; - let max_nonces = latest_received_nonce_at_target - .checked_sub(future_confirmed_nonce_at_target) - .and_then(|diff| self.max_unconfirmed_nonces_at_target.checked_sub(diff)) - .unwrap_or_default(); - let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); - let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; - let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; - let mut selected_weight: Weight = 0; - let mut selected_size: usize = 0; - let mut selected_count: MessageNonce = 0; - - let selected_nonces = self - .strategy - .select_nonces_to_deliver_with_selector(race_state, |range| { - let to_requeue = range - .into_iter() - .skip_while(|(_, weight)| { - // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` - // and `max_messages_size_in_single_batch`, we may still try to submit transaction - // with single message if message overflows these limits. The worst case would be if - // transaction will be rejected by the target runtime, but at least we have tried. - - // limit messages in the batch by weight - let new_selected_weight = match selected_weight.checked_add(weight.weight) { - Some(new_selected_weight) if new_selected_weight <= max_messages_weight_in_single_batch => { - new_selected_weight - } - new_selected_weight if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with declared dispatch \ - weight {:?} that overflows maximal configured weight {}", - new_selected_weight, - max_messages_weight_in_single_batch, - ); - new_selected_weight.unwrap_or(Weight::MAX) - } - _ => return false, - }; - - // limit messages in the batch by size - let new_selected_size = match selected_size.checked_add(weight.size) { - Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch => { - new_selected_size - } - new_selected_size if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with message \ - size {:?} that overflows maximal configured size {}", - new_selected_size, - max_messages_size_in_single_batch, - ); - new_selected_size.unwrap_or(usize::MAX) - } - _ => return false, - }; - - // limit number of messages in the batch - let new_selected_count = selected_count + 1; - if new_selected_count > max_nonces { - return false; - } - - selected_weight = new_selected_weight; - selected_size = new_selected_size; - selected_count = new_selected_count; - true - }) - .collect::>(); - if to_requeue.is_empty() { - None - } else { - Some(to_requeue) - } - })?; - - Some(( - selected_nonces, - MessageProofParameters { - outbound_state_proof_required, - dispatch_weight: selected_weight, - }, - )) - } -} - -impl NoncesRange for MessageWeightsMap { - fn begin(&self) -> MessageNonce { - self.keys().next().cloned().unwrap_or_default() - } - - fn end(&self) -> MessageNonce { - self.keys().next_back().cloned().unwrap_or_default() - } - - fn greater_than(mut self, nonce: MessageNonce) -> Option { - let gte = self.split_off(&(nonce + 1)); - if gte.is_empty() { - None - } else { - Some(gte) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::message_lane_loop::{ - tests::{header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderId, TestTargetHeaderId}, - MessageWeights, - }; - - type TestRaceState = RaceState; - type TestStrategy = MessageDeliveryStrategy; - - fn prepare_strategy() -> (TestRaceState, TestStrategy) { - let mut race_state = RaceState { - best_finalized_source_header_id_at_source: Some(header_id(1)), - best_finalized_source_header_id_at_best_target: Some(header_id(1)), - best_target_header_id: Some(header_id(1)), - best_finalized_target_header_id: Some(header_id(1)), - nonces_to_submit: None, - nonces_submitted: None, - }; - - let mut race_strategy = TestStrategy { - max_unrewarded_relayer_entries_at_target: 4, - max_unconfirmed_nonces_at_target: 4, - max_messages_in_single_batch: 4, - max_messages_weight_in_single_batch: 4, - max_messages_size_in_single_batch: 4, - latest_confirmed_nonces_at_source: vec![(header_id(1), 19)].into_iter().collect(), - target_nonces: Some(TargetClientNonces { - latest_nonce: 19, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState { - unrewarded_relayer_entries: 0, - messages_in_oldest_entry: 0, - total_messages: 0, - }, - }, - }), - strategy: BasicStrategy::new(), - }; - - race_strategy.strategy.source_nonces_updated( - header_id(1), - SourceClientNonces { - new_nonces: vec![ - (20, MessageWeights { weight: 1, size: 1 }), - (21, MessageWeights { weight: 1, size: 1 }), - (22, MessageWeights { weight: 1, size: 1 }), - (23, MessageWeights { weight: 1, size: 1 }), - ] - .into_iter() - .collect(), - confirmed_nonce: Some(19), - }, - ); - - let target_nonces = TargetClientNonces { - latest_nonce: 19, - nonces_data: (), - }; - race_strategy - .strategy - .best_target_nonces_updated(target_nonces.clone(), &mut race_state); - race_strategy - .strategy - .finalized_target_nonces_updated(target_nonces, &mut race_state); - - (race_state, race_strategy) - } - - fn proof_parameters(state_required: bool, weight: Weight) -> MessageProofParameters { - MessageProofParameters { - outbound_state_proof_required: state_required, - dispatch_weight: weight, - } - } - - #[test] - fn weights_map_works_as_nonces_range() { - fn build_map(range: RangeInclusive) -> MessageWeightsMap { - range - .map(|idx| { - ( - idx, - MessageWeights { - weight: idx, - size: idx as _, - }, - ) - }) - .collect() - } - - let map = build_map(20..=30); - - assert_eq!(map.begin(), 20); - assert_eq!(map.end(), 30); - assert_eq!(map.clone().greater_than(10), Some(build_map(20..=30))); - assert_eq!(map.clone().greater_than(19), Some(build_map(20..=30))); - assert_eq!(map.clone().greater_than(20), Some(build_map(21..=30))); - assert_eq!(map.clone().greater_than(25), Some(build_map(26..=30))); - assert_eq!(map.clone().greater_than(29), Some(build_map(30..=30))); - assert_eq!(map.greater_than(30), None); - } - - #[test] - fn message_delivery_strategy_selects_messages_to_deliver() { - let (state, mut strategy) = prepare_strategy(); - - // both sides are ready to relay new messages - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=23), proof_parameters(false, 4))) - ); - } - - #[test] - fn message_delivery_strategy_selects_nothing_if_too_many_confirmations_missing() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unconfirmed_nonces_at_target` messages on target, - // we need to wait until confirmations will be delivered by receiving race - strategy.latest_confirmed_nonces_at_source = vec![( - header_id(1), - strategy.target_nonces.as_ref().unwrap().latest_nonce - strategy.max_unconfirmed_nonces_at_target, - )] - .into_iter() - .collect(); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); - } - - #[test] - fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() { - let (state, mut strategy) = prepare_strategy(); - - // if there are new confirmed nonces on source, we want to relay this information - // to target to prune rewards queue - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[test] - fn message_delivery_strategy_selects_nothing_if_there_are_too_many_unrewarded_relayers() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to wait until rewards will be paid - { - let mut unrewarded_relayers = &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 4; - } - assert_eq!(strategy.select_nonces_to_deliver(&state), None); - } - - #[test] - fn message_delivery_strategy_selects_nothing_if_proved_rewards_is_not_enough_to_remove_oldest_unrewarded_entry() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - { - let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; - nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; - let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 4; - } - assert_eq!(strategy.select_nonces_to_deliver(&state), None); - } - - #[test] - fn message_delivery_strategy_includes_outbound_state_proof_if_proved_rewards_is_enough() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - { - let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; - nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 3; - let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 3; - } - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[test] - fn message_delivery_strategy_limits_batch_by_messages_weight() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max weight - strategy.max_messages_weight_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[test] - fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight() { - let (state, mut strategy) = prepare_strategy(); - - // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) - strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().weight = 10; - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=20), proof_parameters(false, 10))) - ); - } - - #[test] - fn message_delivery_strategy_limits_batch_by_messages_size() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max weight - strategy.max_messages_size_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[test] - fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size() { - let (state, mut strategy) = prepare_strategy(); - - // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) - strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10; - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=20), proof_parameters(false, 1))) - ); - } - - #[test] - fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max number of messages limit - strategy.max_messages_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[test] - fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces() { - let (state, mut strategy) = prepare_strategy(); - - // 1 delivery confirmation from target to source is still missing, so we may only - // relay 3 new messages - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![(header_id(1), prev_confirmed_nonce_at_source - 1)] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[test] - fn message_delivery_strategy_waits_for_confirmed_nonce_header_to_appear_on_target() { - // 1 delivery confirmation from target to source is still missing, so we may deliver - // reward confirmation with our message delivery transaction. But the problem is that - // the reward has been paid at header 2 && this header is still unknown to target node. - // - // => so we can't deliver more than 3 messages - let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![ - (header_id(1), prev_confirmed_nonce_at_source - 1), - (header_id(2), prev_confirmed_nonce_at_source), - ] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=22), proof_parameters(false, 3))) - ); - - // the same situation, but the header 2 is known to the target node, so we may deliver reward confirmation - let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![ - (header_id(1), prev_confirmed_nonce_at_source - 1), - (header_id(2), prev_confirmed_nonce_at_source), - ] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; - state.best_finalized_source_header_id_at_source = Some(header_id(2)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - assert_eq!( - strategy.select_nonces_to_deliver(&state), - Some(((20..=23), proof_parameters(true, 4))) - ); - } -} diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_loop.rs b/polkadot/bridges/relays/messages-relay/src/message_race_loop.rs deleted file mode 100644 index a11a1d7ff5ddef3fece7046bc0d255a52c0e70c3..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/message_race_loop.rs +++ /dev/null @@ -1,612 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Loop that is serving single race within message lane. This could be -//! message delivery race, receiving confirmations race or processing -//! confirmations race. -//! -//! The idea of the race is simple - we have `nonce`-s on source and target -//! nodes. We're trying to prove that the source node has this nonce (and -//! associated data - like messages, lane state, etc) to the target node by -//! generating and submitting proof. - -use crate::message_lane_loop::ClientState; - -use async_trait::async_trait; -use bp_message_lane::MessageNonce; -use futures::{ - future::FutureExt, - stream::{FusedStream, StreamExt}, -}; -use relay_utils::{process_future_result, retry_backoff, FailedClient, MaybeConnectionError}; -use std::{ - fmt::Debug, - ops::RangeInclusive, - time::{Duration, Instant}, -}; - -/// One of races within lane. -pub trait MessageRace { - /// Header id of the race source. - type SourceHeaderId: Debug + Clone + PartialEq; - /// Header id of the race source. - type TargetHeaderId: Debug + Clone + PartialEq; - - /// Message nonce used in the race. - type MessageNonce: Debug + Clone; - /// Proof that is generated and delivered in this race. - type Proof: Debug + Clone; - - /// Name of the race source. - fn source_name() -> String; - /// Name of the race target. - fn target_name() -> String; -} - -/// State of race source client. -type SourceClientState

= ClientState<

::SourceHeaderId,

::TargetHeaderId>; - -/// State of race target client. -type TargetClientState

= ClientState<

::TargetHeaderId,

::SourceHeaderId>; - -/// Inclusive nonces range. -pub trait NoncesRange: Debug + Sized { - /// Get begin of the range. - fn begin(&self) -> MessageNonce; - /// Get end of the range. - fn end(&self) -> MessageNonce; - /// Returns new range with current range nonces that are greater than the passed `nonce`. - /// If there are no such nonces, `None` is returned. - fn greater_than(self, nonce: MessageNonce) -> Option; -} - -/// Nonces on the race source client. -#[derive(Debug, Clone)] -pub struct SourceClientNonces { - /// New nonces range known to the client. `New` here means all nonces generated after - /// `prev_latest_nonce` passed to the `SourceClient::nonces` method. - pub new_nonces: NoncesRange, - /// Latest nonce that is confirmed to the bridged client. This nonce only makes - /// sense in some races. In other races it is `None`. - pub confirmed_nonce: Option, -} - -/// Nonces on the race target client. -#[derive(Debug, Clone)] -pub struct TargetClientNonces { - /// Latest nonce that is known to the target client. - pub latest_nonce: MessageNonce, - /// Additional data from target node that may be used by the race. - pub nonces_data: TargetNoncesData, -} - -/// One of message lane clients, which is source client for the race. -#[async_trait] -pub trait SourceClient { - /// Type of error this clients returns. - type Error: std::fmt::Debug + MaybeConnectionError; - /// Type of nonces range returned by the source client. - type NoncesRange: NoncesRange; - /// Additional proof parameters required to generate proof. - type ProofParameters; - - /// Return nonces that are known to the source client. - async fn nonces( - &self, - at_block: P::SourceHeaderId, - prev_latest_nonce: MessageNonce, - ) -> Result<(P::SourceHeaderId, SourceClientNonces), Self::Error>; - /// Generate proof for delivering to the target client. - async fn generate_proof( - &self, - at_block: P::SourceHeaderId, - nonces: RangeInclusive, - proof_parameters: Self::ProofParameters, - ) -> Result<(P::SourceHeaderId, RangeInclusive, P::Proof), Self::Error>; -} - -/// One of message lane clients, which is target client for the race. -#[async_trait] -pub trait TargetClient { - /// Type of error this clients returns. - type Error: std::fmt::Debug + MaybeConnectionError; - /// Type of the additional data from the target client, used by the race. - type TargetNoncesData: std::fmt::Debug; - - /// Return nonces that are known to the target client. - async fn nonces( - &self, - at_block: P::TargetHeaderId, - update_metrics: bool, - ) -> Result<(P::TargetHeaderId, TargetClientNonces), Self::Error>; - /// Submit proof to the target client. - async fn submit_proof( - &self, - generated_at_block: P::SourceHeaderId, - nonces: RangeInclusive, - proof: P::Proof, - ) -> Result, Self::Error>; -} - -/// Race strategy. -pub trait RaceStrategy: Debug { - /// Type of nonces range expected from the source client. - type SourceNoncesRange: NoncesRange; - /// Additional proof parameters required to generate proof. - type ProofParameters; - /// Additional data expected from the target client. - type TargetNoncesData; - - /// Should return true if nothing has to be synced. - fn is_empty(&self) -> bool; - /// Return best nonce at source node. - /// - /// `Some` is returned only if we are sure that the value is greater or equal - /// than the result of `best_at_target`. - fn best_at_source(&self) -> Option; - /// Return best nonce at target node. - /// - /// May return `None` if value is yet unknown. - fn best_at_target(&self) -> Option; - - /// Called when nonces are updated at source node of the race. - fn source_nonces_updated(&mut self, at_block: SourceHeaderId, nonces: SourceClientNonces); - /// Called when best nonces are updated at target node of the race. - fn best_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, - ); - /// Called when finalized nonces are updated at target node of the race. - fn finalized_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, - ); - /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated - /// data) from source to target node. - /// Additionally, parameters required to generate proof are returned. - fn select_nonces_to_deliver( - &mut self, - race_state: &RaceState, - ) -> Option<(RangeInclusive, Self::ProofParameters)>; -} - -/// State of the race. -#[derive(Debug)] -pub struct RaceState { - /// Best finalized source header id at the source client. - pub best_finalized_source_header_id_at_source: Option, - /// Best finalized source header id at the best block on the target - /// client (at the `best_finalized_source_header_id_at_best_target`). - pub best_finalized_source_header_id_at_best_target: Option, - /// Best header id at the target client. - pub best_target_header_id: Option, - /// Best finalized header id at the target client. - pub best_finalized_target_header_id: Option, - /// Range of nonces that we have selected to submit. - pub nonces_to_submit: Option<(SourceHeaderId, RangeInclusive, Proof)>, - /// Range of nonces that is currently submitted. - pub nonces_submitted: Option>, -} - -/// Run race loop until connection with target or source node is lost. -pub async fn run, TC: TargetClient

>( - race_source: SC, - race_source_updated: impl FusedStream>, - race_target: TC, - race_target_updated: impl FusedStream>, - stall_timeout: Duration, - mut strategy: impl RaceStrategy< - P::SourceHeaderId, - P::TargetHeaderId, - P::Proof, - SourceNoncesRange = SC::NoncesRange, - ProofParameters = SC::ProofParameters, - TargetNoncesData = TC::TargetNoncesData, - >, -) -> Result<(), FailedClient> { - let mut progress_context = Instant::now(); - let mut race_state = RaceState::default(); - let mut stall_countdown = Instant::now(); - - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = true; - let mut source_nonces_required = false; - let source_nonces = futures::future::Fuse::terminated(); - let source_generate_proof = futures::future::Fuse::terminated(); - let source_go_offline_future = futures::future::Fuse::terminated(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = true; - let mut target_best_nonces_required = false; - let mut target_finalized_nonces_required = false; - let target_best_nonces = futures::future::Fuse::terminated(); - let target_finalized_nonces = futures::future::Fuse::terminated(); - let target_submit_proof = futures::future::Fuse::terminated(); - let target_go_offline_future = futures::future::Fuse::terminated(); - - futures::pin_mut!( - race_source_updated, - source_nonces, - source_generate_proof, - source_go_offline_future, - race_target_updated, - target_best_nonces, - target_finalized_nonces, - target_submit_proof, - target_go_offline_future, - ); - - loop { - futures::select! { - // when headers ids are updated - source_state = race_source_updated.next() => { - if let Some(source_state) = source_state { - let is_source_state_updated = race_state.best_finalized_source_header_id_at_source.as_ref() - != Some(&source_state.best_finalized_self); - if is_source_state_updated { - source_nonces_required = true; - race_state.best_finalized_source_header_id_at_source = Some(source_state.best_finalized_self); - } - } - }, - target_state = race_target_updated.next() => { - if let Some(target_state) = target_state { - let is_target_best_state_updated = race_state.best_target_header_id.as_ref() - != Some(&target_state.best_self); - - if is_target_best_state_updated { - target_best_nonces_required = true; - race_state.best_target_header_id = Some(target_state.best_self); - race_state.best_finalized_source_header_id_at_best_target - = Some(target_state.best_finalized_peer_at_best_self); - } - - let is_target_finalized_state_updated = race_state.best_finalized_target_header_id.as_ref() - != Some(&target_state.best_finalized_self); - if is_target_finalized_state_updated { - target_finalized_nonces_required = true; - race_state.best_finalized_target_header_id = Some(target_state.best_finalized_self); - } - } - }, - - // when nonces are updated - nonces = source_nonces => { - source_nonces_required = false; - - source_client_is_online = process_future_result( - nonces, - &mut source_retry_backoff, - |(at_block, nonces)| { - log::debug!( - target: "bridge", - "Received nonces from {}: {:?}", - P::source_name(), - nonces, - ); - - strategy.source_nonces_updated(at_block, nonces); - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving nonces from {}", P::source_name()), - ).fail_if_connection_error(FailedClient::Source)?; - }, - nonces = target_best_nonces => { - target_best_nonces_required = false; - - target_client_is_online = process_future_result( - nonces, - &mut target_retry_backoff, - |(_, nonces)| { - log::debug!( - target: "bridge", - "Received best nonces from {}: {:?}", - P::target_name(), - nonces, - ); - - let prev_best_at_target = strategy.best_at_target(); - strategy.best_target_nonces_updated(nonces, &mut race_state); - if strategy.best_at_target() != prev_best_at_target { - stall_countdown = Instant::now(); - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best nonces from {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - nonces = target_finalized_nonces => { - target_finalized_nonces_required = false; - - target_client_is_online = process_future_result( - nonces, - &mut target_retry_backoff, - |(_, nonces)| { - log::debug!( - target: "bridge", - "Received finalized nonces from {}: {:?}", - P::target_name(), - nonces, - ); - - strategy.finalized_target_nonces_updated(nonces, &mut race_state); - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving finalized nonces from {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - - // proof generation and submission - proof = source_generate_proof => { - source_client_is_online = process_future_result( - proof, - &mut source_retry_backoff, - |(at_block, nonces_range, proof)| { - log::debug!( - target: "bridge", - "Received proof for nonces in range {:?} from {}", - nonces_range, - P::source_name(), - ); - - race_state.nonces_to_submit = Some((at_block, nonces_range, proof)); - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error generating proof at {}", P::source_name()), - ).fail_if_connection_error(FailedClient::Source)?; - }, - proof_submit_result = target_submit_proof => { - target_client_is_online = process_future_result( - proof_submit_result, - &mut target_retry_backoff, - |nonces_range| { - log::debug!( - target: "bridge", - "Successfully submitted proof of nonces {:?} to {}", - nonces_range, - P::target_name(), - ); - - race_state.nonces_to_submit = None; - race_state.nonces_submitted = Some(nonces_range); - stall_countdown = Instant::now(); - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error submitting proof {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - - // when we're ready to retry request - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - } - - progress_context = print_race_progress::(progress_context, &strategy); - - if stall_countdown.elapsed() > stall_timeout { - log::warn!( - target: "bridge", - "{} -> {} race has stalled. State: {:?}. Strategy: {:?}", - P::source_name(), - P::target_name(), - race_state, - strategy, - ); - - return Err(FailedClient::Both); - } else if race_state.nonces_to_submit.is_none() && race_state.nonces_submitted.is_none() && strategy.is_empty() - { - stall_countdown = Instant::now(); - } - - if source_client_is_online { - source_client_is_online = false; - - let nonces_to_deliver = select_nonces_to_deliver(&race_state, &mut strategy); - let best_at_source = strategy.best_at_source(); - - if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver { - log::debug!( - target: "bridge", - "Asking {} to prove nonces in range {:?} at block {:?}", - P::source_name(), - nonces_range, - at_block, - ); - source_generate_proof.set( - race_source - .generate_proof(at_block, nonces_range, proof_parameters) - .fuse(), - ); - } else if source_nonces_required && best_at_source.is_some() { - log::debug!(target: "bridge", "Asking {} about message nonces", P::source_name()); - let at_block = race_state - .best_finalized_source_header_id_at_source - .as_ref() - .expect( - "source_nonces_required is only true when\ - best_finalized_source_header_id_at_source is Some; qed", - ) - .clone(); - source_nonces.set( - race_source - .nonces(at_block, best_at_source.expect("guaranteed by if condition; qed")) - .fuse(), - ); - } else { - source_client_is_online = true; - } - } - - if target_client_is_online { - target_client_is_online = false; - - if let Some((at_block, nonces_range, proof)) = race_state.nonces_to_submit.as_ref() { - log::debug!( - target: "bridge", - "Going to submit proof of messages in range {:?} to {} node", - nonces_range, - P::target_name(), - ); - target_submit_proof.set( - race_target - .submit_proof(at_block.clone(), nonces_range.clone(), proof.clone()) - .fuse(), - ); - } else if target_best_nonces_required { - log::debug!(target: "bridge", "Asking {} about best message nonces", P::target_name()); - let at_block = race_state - .best_target_header_id - .as_ref() - .expect("target_best_nonces_required is only true when best_target_header_id is Some; qed") - .clone(); - target_best_nonces.set(race_target.nonces(at_block, false).fuse()); - } else if target_finalized_nonces_required { - log::debug!(target: "bridge", "Asking {} about finalized message nonces", P::target_name()); - let at_block = race_state - .best_finalized_target_header_id - .as_ref() - .expect( - "target_finalized_nonces_required is only true when\ - best_finalized_target_header_id is Some; qed", - ) - .clone(); - target_finalized_nonces.set(race_target.nonces(at_block, true).fuse()); - } else { - target_client_is_online = true; - } - } - } -} - -impl Default for RaceState { - fn default() -> Self { - RaceState { - best_finalized_source_header_id_at_source: None, - best_finalized_source_header_id_at_best_target: None, - best_target_header_id: None, - best_finalized_target_header_id: None, - nonces_to_submit: None, - nonces_submitted: None, - } - } -} - -/// Print race progress. -fn print_race_progress(prev_time: Instant, strategy: &S) -> Instant -where - P: MessageRace, - S: RaceStrategy, -{ - let now_time = Instant::now(); - - let need_update = now_time.saturating_duration_since(prev_time) > Duration::from_secs(10); - if !need_update { - return prev_time; - } - - let now_best_nonce_at_source = strategy.best_at_source(); - let now_best_nonce_at_target = strategy.best_at_target(); - log::info!( - target: "bridge", - "Synced {:?} of {:?} nonces in {} -> {} race", - now_best_nonce_at_target, - now_best_nonce_at_source, - P::source_name(), - P::target_name(), - ); - now_time -} - -fn select_nonces_to_deliver( - race_state: &RaceState, - strategy: &mut Strategy, -) -> Option<(SourceHeaderId, RangeInclusive, Strategy::ProofParameters)> -where - SourceHeaderId: Clone, - Strategy: RaceStrategy, -{ - race_state - .best_finalized_source_header_id_at_best_target - .as_ref() - .and_then(|best_finalized_source_header_id_at_best_target| { - strategy - .select_nonces_to_deliver(&race_state) - .map(|(nonces_range, proof_parameters)| { - ( - best_finalized_source_header_id_at_best_target.clone(), - nonces_range, - proof_parameters, - ) - }) - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::message_race_strategy::BasicStrategy; - use relay_utils::HeaderId; - - #[test] - fn proof_is_generated_at_best_block_known_to_target_node() { - const GENERATED_AT: u64 = 6; - const BEST_AT_SOURCE: u64 = 10; - const BEST_AT_TARGET: u64 = 8; - - // target node only knows about source' BEST_AT_TARGET block - // source node has BEST_AT_SOURCE > BEST_AT_TARGET block - let mut race_state = RaceState::<_, _, ()> { - best_finalized_source_header_id_at_source: Some(HeaderId(BEST_AT_SOURCE, BEST_AT_SOURCE)), - best_finalized_source_header_id_at_best_target: Some(HeaderId(BEST_AT_TARGET, BEST_AT_TARGET)), - best_target_header_id: Some(HeaderId(0, 0)), - best_finalized_target_header_id: Some(HeaderId(0, 0)), - nonces_to_submit: None, - nonces_submitted: None, - }; - - // we have some nonces to deliver and they're generated at GENERATED_AT < BEST_AT_SOURCE - let mut strategy = BasicStrategy::new(); - strategy.source_nonces_updated( - HeaderId(GENERATED_AT, GENERATED_AT), - SourceClientNonces { - new_nonces: 0..=10, - confirmed_nonce: None, - }, - ); - strategy.best_target_nonces_updated( - TargetClientNonces { - latest_nonce: 5u64, - nonces_data: (), - }, - &mut race_state, - ); - - // the proof will be generated on source, but using BEST_AT_TARGET block - assert_eq!( - select_nonces_to_deliver(&race_state, &mut strategy), - Some((HeaderId(BEST_AT_TARGET, BEST_AT_TARGET), 6..=10, (),)) - ); - } -} diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_receiving.rs b/polkadot/bridges/relays/messages-relay/src/message_race_receiving.rs deleted file mode 100644 index cba6ee385893ca17c5fea0a95b751f07941e2bd4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/message_race_receiving.rs +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Message receiving race delivers proof-of-messages-delivery from lane.target to lane.source. - -use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use crate::message_lane_loop::{ - SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient, - TargetClientState, -}; -use crate::message_race_loop::{ - MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, TargetClientNonces, -}; -use crate::message_race_strategy::BasicStrategy; -use crate::metrics::MessageLaneLoopMetrics; - -use async_trait::async_trait; -use bp_message_lane::MessageNonce; -use futures::stream::FusedStream; -use relay_utils::FailedClient; -use std::{marker::PhantomData, ops::RangeInclusive, time::Duration}; - -/// Message receiving confirmations delivery strategy. -type ReceivingConfirmationsBasicStrategy

= BasicStrategy< -

::TargetHeaderNumber, -

::TargetHeaderHash, -

::SourceHeaderNumber, -

::SourceHeaderHash, - RangeInclusive, -

::MessagesReceivingProof, ->; - -/// Run receiving confirmations race. -pub async fn run( - source_client: impl MessageLaneSourceClient

, - source_state_updates: impl FusedStream>, - target_client: impl MessageLaneTargetClient

, - target_state_updates: impl FusedStream>, - stall_timeout: Duration, - metrics_msg: Option, -) -> Result<(), FailedClient> { - crate::message_race_loop::run( - ReceivingConfirmationsRaceSource { - client: target_client, - metrics_msg: metrics_msg.clone(), - _phantom: Default::default(), - }, - target_state_updates, - ReceivingConfirmationsRaceTarget { - client: source_client, - metrics_msg, - _phantom: Default::default(), - }, - source_state_updates, - stall_timeout, - ReceivingConfirmationsBasicStrategy::

::new(), - ) - .await -} - -/// Messages receiving confirmations race. -struct ReceivingConfirmationsRace

(std::marker::PhantomData

); - -impl MessageRace for ReceivingConfirmationsRace

{ - type SourceHeaderId = TargetHeaderIdOf

; - type TargetHeaderId = SourceHeaderIdOf

; - - type MessageNonce = MessageNonce; - type Proof = P::MessagesReceivingProof; - - fn source_name() -> String { - format!("{}::ReceivingConfirmationsDelivery", P::TARGET_NAME) - } - - fn target_name() -> String { - format!("{}::ReceivingConfirmationsDelivery", P::SOURCE_NAME) - } -} - -/// Message receiving confirmations race source, which is a target of the lane. -struct ReceivingConfirmationsRaceSource { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl SourceClient> for ReceivingConfirmationsRaceSource -where - P: MessageLane, - C: MessageLaneTargetClient

, -{ - type Error = C::Error; - type NoncesRange = RangeInclusive; - type ProofParameters = (); - - async fn nonces( - &self, - at_block: TargetHeaderIdOf

, - prev_latest_nonce: MessageNonce, - ) -> Result<(TargetHeaderIdOf

, SourceClientNonces), Self::Error> { - let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_target_latest_received_nonce::

(latest_received_nonce); - } - Ok(( - at_block, - SourceClientNonces { - new_nonces: prev_latest_nonce + 1..=latest_received_nonce, - confirmed_nonce: None, - }, - )) - } - - #[allow(clippy::unit_arg)] - async fn generate_proof( - &self, - at_block: TargetHeaderIdOf

, - nonces: RangeInclusive, - _proof_parameters: Self::ProofParameters, - ) -> Result< - ( - TargetHeaderIdOf

, - RangeInclusive, - P::MessagesReceivingProof, - ), - Self::Error, - > { - self.client - .prove_messages_receiving(at_block) - .await - .map(|(at_block, proof)| (at_block, nonces, proof)) - } -} - -/// Message receiving confirmations race target, which is a source of the lane. -struct ReceivingConfirmationsRaceTarget { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl TargetClient> for ReceivingConfirmationsRaceTarget -where - P: MessageLane, - C: MessageLaneSourceClient

, -{ - type Error = C::Error; - type TargetNoncesData = (); - - async fn nonces( - &self, - at_block: SourceHeaderIdOf

, - update_metrics: bool, - ) -> Result<(SourceHeaderIdOf

, TargetClientNonces<()>), Self::Error> { - let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; - if update_metrics { - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_source_latest_confirmed_nonce::

(latest_confirmed_nonce); - } - } - Ok(( - at_block, - TargetClientNonces { - latest_nonce: latest_confirmed_nonce, - nonces_data: (), - }, - )) - } - - async fn submit_proof( - &self, - generated_at_block: TargetHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesReceivingProof, - ) -> Result, Self::Error> { - self.client - .submit_messages_receiving_proof(generated_at_block, proof) - .await?; - Ok(nonces) - } -} - -impl NoncesRange for RangeInclusive { - fn begin(&self) -> MessageNonce { - *RangeInclusive::::start(self) - } - - fn end(&self) -> MessageNonce { - *RangeInclusive::::end(self) - } - - fn greater_than(self, nonce: MessageNonce) -> Option { - let next_nonce = nonce + 1; - let end = *self.end(); - if next_nonce > end { - None - } else { - Some(std::cmp::max(self.begin(), next_nonce)..=end) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn range_inclusive_works_as_nonces_range() { - let range = 20..=30; - - assert_eq!(NoncesRange::begin(&range), 20); - assert_eq!(NoncesRange::end(&range), 30); - assert_eq!(range.clone().greater_than(10), Some(20..=30)); - assert_eq!(range.clone().greater_than(19), Some(20..=30)); - assert_eq!(range.clone().greater_than(20), Some(21..=30)); - assert_eq!(range.clone().greater_than(25), Some(26..=30)); - assert_eq!(range.clone().greater_than(29), Some(30..=30)); - assert_eq!(range.greater_than(30), None); - } -} diff --git a/polkadot/bridges/relays/messages-relay/src/message_race_strategy.rs b/polkadot/bridges/relays/messages-relay/src/message_race_strategy.rs deleted file mode 100644 index dcbcbc4d377e0bd54c7c7ab18a897e10e0aba242..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/message_race_strategy.rs +++ /dev/null @@ -1,479 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Basic delivery strategy. The strategy selects nonces if: -//! -//! 1) there are more nonces on the source side than on the target side; -//! 2) new nonces may be proved to target node (i.e. they have appeared at the -//! block, which is known to the target node). - -use crate::message_race_loop::{NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces}; - -use bp_message_lane::MessageNonce; -use relay_utils::HeaderId; -use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, ops::RangeInclusive}; - -/// Nonces delivery strategy. -#[derive(Debug)] -pub struct BasicStrategy< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, -> { - /// All queued nonces. - source_queue: VecDeque<(HeaderId, SourceNoncesRange)>, - /// Best nonce known to target node (at its best block). `None` if it has not been received yet. - best_target_nonce: Option, - /// Unused generic types dump. - _phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>, -} - -impl - BasicStrategy -where - SourceHeaderHash: Clone, - SourceHeaderNumber: Clone + Ord, - SourceNoncesRange: NoncesRange, -{ - /// Create new delivery strategy. - pub fn new() -> Self { - BasicStrategy { - source_queue: VecDeque::new(), - best_target_nonce: None, - _phantom: Default::default(), - } - } - - /// Mutable reference to source queue to use in tests. - #[cfg(test)] - pub(crate) fn source_queue_mut( - &mut self, - ) -> &mut VecDeque<(HeaderId, SourceNoncesRange)> { - &mut self.source_queue - } - - /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated - /// data) from source to target node. - /// - /// The `selector` function receives range of nonces and should return `None` if the whole - /// range needs to be delivered. If there are some nonces in the range that can't be delivered - /// right now, it should return `Some` with 'undeliverable' nonces. Please keep in mind that - /// this should be the sub-range that the passed range ends with, because nonces are always - /// delivered in-order. Otherwise the function will panic. - pub fn select_nonces_to_deliver_with_selector( - &mut self, - race_state: &RaceState< - HeaderId, - HeaderId, - Proof, - >, - mut selector: impl FnMut(SourceNoncesRange) -> Option, - ) -> Option> { - // if we do not know best nonce at target node, we can't select anything - let target_nonce = self.best_target_nonce?; - - // if we have already selected nonces that we want to submit, do nothing - if race_state.nonces_to_submit.is_some() { - return None; - } - - // if we already submitted some nonces, do nothing - if race_state.nonces_submitted.is_some() { - return None; - } - - // 1) we want to deliver all nonces, starting from `target_nonce + 1` - // 2) we can't deliver new nonce until header, that has emitted this nonce, is finalized - // by target client - // 3) selector is used for more complicated logic - let best_header_at_target = &race_state.best_finalized_source_header_id_at_best_target.as_ref()?; - let mut nonces_end = None; - while let Some((queued_at, queued_range)) = self.source_queue.pop_front() { - // select (sub) range to deliver - let queued_range_begin = queued_range.begin(); - let queued_range_end = queued_range.end(); - let range_to_requeue = if queued_at.0 > best_header_at_target.0 { - // if header that has queued the range is not yet finalized at bridged chain, - // we can't prove anything - Some(queued_range) - } else { - // selector returns `Some(range)` if this `range` needs to be requeued - selector(queued_range) - }; - - // requeue (sub) range and update range to deliver - match range_to_requeue { - Some(range_to_requeue) => { - assert!( - range_to_requeue.begin() <= range_to_requeue.end() - && range_to_requeue.begin() >= queued_range_begin - && range_to_requeue.end() == queued_range_end, - "Incorrect implementation of internal `selector` function. Expected original\ - range {:?} to end with returned range {:?}", - queued_range_begin..=queued_range_end, - range_to_requeue, - ); - - if range_to_requeue.begin() != queued_range_begin { - nonces_end = Some(range_to_requeue.begin() - 1); - } - self.source_queue.push_front((queued_at, range_to_requeue)); - break; - } - None => { - nonces_end = Some(queued_range_end); - } - } - } - - nonces_end.map(|nonces_end| RangeInclusive::new(target_nonce + 1, nonces_end)) - } -} - -impl - RaceStrategy, HeaderId, Proof> - for BasicStrategy -where - SourceHeaderHash: Clone + Debug, - SourceHeaderNumber: Clone + Ord + Debug, - SourceNoncesRange: NoncesRange + Debug, - TargetHeaderHash: Debug, - TargetHeaderNumber: Debug, - Proof: Debug, -{ - type SourceNoncesRange = SourceNoncesRange; - type ProofParameters = (); - type TargetNoncesData = (); - - fn is_empty(&self) -> bool { - self.source_queue.is_empty() - } - - fn best_at_source(&self) -> Option { - let best_in_queue = self.source_queue.back().map(|(_, range)| range.end()); - match (best_in_queue, self.best_target_nonce) { - (Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => Some(best_in_queue), - (_, Some(best_target_nonce)) => Some(best_target_nonce), - (_, None) => None, - } - } - - fn best_at_target(&self) -> Option { - self.best_target_nonce - } - - fn source_nonces_updated( - &mut self, - at_block: HeaderId, - nonces: SourceClientNonces, - ) { - let best_in_queue = self - .source_queue - .back() - .map(|(_, range)| range.end()) - .or(self.best_target_nonce) - .unwrap_or_default(); - self.source_queue.extend( - nonces - .new_nonces - .greater_than(best_in_queue) - .into_iter() - .map(move |range| (at_block.clone(), range)), - ) - } - - fn best_target_nonces_updated( - &mut self, - nonces: TargetClientNonces<()>, - race_state: &mut RaceState< - HeaderId, - HeaderId, - Proof, - >, - ) { - let nonce = nonces.latest_nonce; - - if let Some(best_target_nonce) = self.best_target_nonce { - if nonce < best_target_nonce { - return; - } - } - - while let Some(true) = self.source_queue.front().map(|(_, range)| range.begin() <= nonce) { - let maybe_subrange = self - .source_queue - .pop_front() - .and_then(|(at_block, range)| range.greater_than(nonce).map(|subrange| (at_block, subrange))); - if let Some((at_block, subrange)) = maybe_subrange { - self.source_queue.push_front((at_block, subrange)); - break; - } - } - - let need_to_select_new_nonces = race_state - .nonces_to_submit - .as_ref() - .map(|(_, nonces, _)| *nonces.end() <= nonce) - .unwrap_or(false); - if need_to_select_new_nonces { - race_state.nonces_to_submit = None; - } - - let need_new_nonces_to_submit = race_state - .nonces_submitted - .as_ref() - .map(|nonces| *nonces.end() <= nonce) - .unwrap_or(false); - if need_new_nonces_to_submit { - race_state.nonces_submitted = None; - } - - self.best_target_nonce = Some(std::cmp::max( - self.best_target_nonce.unwrap_or(nonces.latest_nonce), - nonce, - )); - } - - fn finalized_target_nonces_updated( - &mut self, - nonces: TargetClientNonces<()>, - _race_state: &mut RaceState< - HeaderId, - HeaderId, - Proof, - >, - ) { - self.best_target_nonce = Some(std::cmp::max( - self.best_target_nonce.unwrap_or(nonces.latest_nonce), - nonces.latest_nonce, - )); - } - - fn select_nonces_to_deliver( - &mut self, - race_state: &RaceState< - HeaderId, - HeaderId, - Proof, - >, - ) -> Option<(RangeInclusive, Self::ProofParameters)> { - self.select_nonces_to_deliver_with_selector(race_state, |_| None) - .map(|range| (range, ())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::message_lane::MessageLane; - use crate::message_lane_loop::tests::{header_id, TestMessageLane, TestMessagesProof}; - - type SourceNoncesRange = RangeInclusive; - - type BasicStrategy

= super::BasicStrategy< -

::SourceHeaderNumber, -

::SourceHeaderHash, -

::TargetHeaderNumber, -

::TargetHeaderHash, - SourceNoncesRange, -

::MessagesProof, - >; - - fn source_nonces(new_nonces: SourceNoncesRange) -> SourceClientNonces { - SourceClientNonces { - new_nonces, - confirmed_nonce: None, - } - } - - fn target_nonces(latest_nonce: MessageNonce) -> TargetClientNonces<()> { - TargetClientNonces { - latest_nonce, - nonces_data: (), - } - } - - #[test] - fn strategy_is_empty_works() { - let mut strategy = BasicStrategy::::new(); - assert_eq!(strategy.is_empty(), true); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); - assert_eq!(strategy.is_empty(), false); - } - - #[test] - fn best_at_source_is_never_lower_than_target_nonce() { - let mut strategy = BasicStrategy::::new(); - assert_eq!(strategy.best_at_source(), None); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - assert_eq!(strategy.best_at_source(), None); - strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); - assert_eq!(strategy.source_queue, vec![]); - assert_eq!(strategy.best_at_source(), Some(10)); - } - - #[test] - fn source_nonce_is_never_lower_than_known_target_nonce() { - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - assert_eq!(strategy.source_queue, vec![]); - } - - #[test] - fn source_nonce_is_never_lower_than_latest_known_source_nonce() { - let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - strategy.source_nonces_updated(header_id(2), source_nonces(1..=3)); - strategy.source_nonces_updated(header_id(2), source_nonces(1..=5)); - assert_eq!(strategy.source_queue, vec![(header_id(1), 1..=5)]); - } - - #[test] - fn target_nonce_is_never_lower_than_latest_known_target_nonce() { - let mut strategy = BasicStrategy::::new(); - assert_eq!(strategy.best_target_nonce, None); - strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); - assert_eq!(strategy.best_target_nonce, Some(10)); - strategy.best_target_nonces_updated(target_nonces(5), &mut Default::default()); - assert_eq!(strategy.best_target_nonce, Some(10)); - } - - #[test] - fn updated_target_nonce_removes_queued_entries() { - let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - strategy.source_nonces_updated(header_id(2), source_nonces(6..=10)); - strategy.source_nonces_updated(header_id(3), source_nonces(11..=15)); - strategy.source_nonces_updated(header_id(4), source_nonces(16..=20)); - strategy.best_target_nonces_updated(target_nonces(15), &mut Default::default()); - assert_eq!(strategy.source_queue, vec![(header_id(4), 16..=20)]); - strategy.best_target_nonces_updated(target_nonces(17), &mut Default::default()); - assert_eq!(strategy.source_queue, vec![(header_id(4), 18..=20)]); - } - - #[test] - fn selected_nonces_are_dropped_on_target_nonce_update() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_to_submit = Some((header_id(1), 5..=10, (5..=10, None))); - strategy.best_target_nonces_updated(target_nonces(7), &mut state); - assert!(state.nonces_to_submit.is_some()); - strategy.best_target_nonces_updated(target_nonces(10), &mut state); - assert!(state.nonces_to_submit.is_none()); - } - - #[test] - fn submitted_nonces_are_dropped_on_target_nonce_update() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_submitted = Some(5..=10); - strategy.best_target_nonces_updated(target_nonces(7), &mut state); - assert!(state.nonces_submitted.is_some()); - strategy.best_target_nonces_updated(target_nonces(10), &mut state); - assert!(state.nonces_submitted.is_none()); - } - - #[test] - fn nothing_is_selected_if_something_is_already_selected() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_to_submit = Some((header_id(1), 1..=10, (1..=10, None))); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); - } - - #[test] - fn nothing_is_selected_if_something_is_already_submitted() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_submitted = Some(1..=10); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); - } - - #[test] - fn select_nonces_to_deliver_works() { - let mut state = RaceState::<_, _, TestMessagesProof>::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); - strategy.source_nonces_updated(header_id(2), source_nonces(2..=2)); - strategy.source_nonces_updated(header_id(3), source_nonces(3..=6)); - strategy.source_nonces_updated(header_id(5), source_nonces(7..=8)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - assert_eq!(strategy.select_nonces_to_deliver(&state), Some((1..=6, ()))); - strategy.best_target_nonces_updated(target_nonces(6), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(5)); - assert_eq!(strategy.select_nonces_to_deliver(&state), Some((7..=8, ()))); - strategy.best_target_nonces_updated(target_nonces(8), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(&state), None); - } - - #[test] - fn select_nonces_to_deliver_able_to_split_ranges_with_selector() { - let mut state = RaceState::<_, _, TestMessagesProof>::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=100)); - - state.best_finalized_source_header_id_at_source = Some(header_id(1)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - state.best_target_header_id = Some(header_id(1)); - - assert_eq!( - strategy.select_nonces_to_deliver_with_selector(&state, |_| Some(50..=100)), - Some(1..=49), - ); - } - - fn run_panic_test_for_incorrect_selector( - invalid_selector: impl Fn(SourceNoncesRange) -> Option, - ) { - let mut state = RaceState::<_, _, TestMessagesProof>::default(); - let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=100)); - strategy.best_target_nonces_updated(target_nonces(50), &mut state); - state.best_finalized_source_header_id_at_source = Some(header_id(1)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - state.best_target_header_id = Some(header_id(1)); - strategy.select_nonces_to_deliver_with_selector(&state, invalid_selector); - } - - #[test] - #[should_panic] - fn select_nonces_to_deliver_panics_if_selector_returns_empty_range() { - #[allow(clippy::reversed_empty_ranges)] - run_panic_test_for_incorrect_selector(|_| Some(2..=1)) - } - - #[test] - #[should_panic] - fn select_nonces_to_deliver_panics_if_selector_returns_range_that_starts_before_passed_range() { - run_panic_test_for_incorrect_selector(|range| Some(range.begin() - 1..=*range.end())) - } - - #[test] - #[should_panic] - fn select_nonces_to_deliver_panics_if_selector_returns_range_with_mismatched_end() { - run_panic_test_for_incorrect_selector(|range| Some(range.begin()..=*range.end() + 1)) - } -} diff --git a/polkadot/bridges/relays/messages-relay/src/metrics.rs b/polkadot/bridges/relays/messages-relay/src/metrics.rs deleted file mode 100644 index b001d32926ddb53034e3eeb69eb8acb4706349c4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/messages-relay/src/metrics.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for message lane relay loop. - -use crate::message_lane::MessageLane; -use crate::message_lane_loop::{SourceClientState, TargetClientState}; - -use bp_message_lane::MessageNonce; -use relay_utils::metrics::{register, GaugeVec, Metrics, Opts, Registry, U64}; - -/// Message lane relay metrics. -/// -/// Cloning only clones references. -#[derive(Clone)] -pub struct MessageLaneLoopMetrics { - /// Best finalized block numbers - "source", "target", "source_at_target", "target_at_source". - best_block_numbers: GaugeVec, - /// Lane state nonces: "source_latest_generated", "source_latest_confirmed", - /// "target_latest_received", "target_latest_confirmed". - lane_state_nonces: GaugeVec, -} - -impl Metrics for MessageLaneLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.best_block_numbers.clone(), registry).map_err(|e| e.to_string())?; - register(self.lane_state_nonces.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) - } -} - -impl Default for MessageLaneLoopMetrics { - fn default() -> Self { - MessageLaneLoopMetrics { - best_block_numbers: GaugeVec::new( - Opts::new("best_block_numbers", "Best finalized block numbers"), - &["type"], - ) - .expect("metric is static and thus valid; qed"), - lane_state_nonces: GaugeVec::new(Opts::new("lane_state_nonces", "Nonces of the lane state"), &["type"]) - .expect("metric is static and thus valid; qed"), - } - } -} - -impl MessageLaneLoopMetrics { - /// Update source client state metrics. - pub fn update_source_state(&self, source_client_state: SourceClientState

) { - self.best_block_numbers - .with_label_values(&["source"]) - .set(source_client_state.best_self.0.into()); - self.best_block_numbers - .with_label_values(&["target_at_source"]) - .set(source_client_state.best_finalized_peer_at_best_self.0.into()); - } - - /// Update target client state metrics. - pub fn update_target_state(&self, target_client_state: TargetClientState

) { - self.best_block_numbers - .with_label_values(&["target"]) - .set(target_client_state.best_self.0.into()); - self.best_block_numbers - .with_label_values(&["source_at_target"]) - .set(target_client_state.best_finalized_peer_at_best_self.0.into()); - } - - /// Update latest generated nonce at source. - pub fn update_source_latest_generated_nonce(&self, source_latest_generated_nonce: MessageNonce) { - self.lane_state_nonces - .with_label_values(&["source_latest_generated"]) - .set(source_latest_generated_nonce); - } - - /// Update latest confirmed nonce at source. - pub fn update_source_latest_confirmed_nonce(&self, source_latest_confirmed_nonce: MessageNonce) { - self.lane_state_nonces - .with_label_values(&["source_latest_confirmed"]) - .set(source_latest_confirmed_nonce); - } - - /// Update latest received nonce at target. - pub fn update_target_latest_received_nonce(&self, target_latest_generated_nonce: MessageNonce) { - self.lane_state_nonces - .with_label_values(&["target_latest_received"]) - .set(target_latest_generated_nonce); - } - - /// Update latest confirmed nonce at target. - pub fn update_target_latest_confirmed_nonce(&self, target_latest_confirmed_nonce: MessageNonce) { - self.lane_state_nonces - .with_label_values(&["target_latest_confirmed"]) - .set(target_latest_confirmed_nonce); - } -} diff --git a/polkadot/bridges/relays/millau-client/Cargo.toml b/polkadot/bridges/relays/millau-client/Cargo.toml deleted file mode 100644 index 5f9cbd170c938909c97c7194a51366b30104b679..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/millau-client/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "relay-millau-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } -relay-utils = { path = "../utils" } - -# Supported Chains - -millau-runtime = { path = "../../bin/millau/runtime" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/millau-client/src/lib.rs b/polkadot/bridges/relays/millau-client/src/lib.rs deleted file mode 100644 index c7d04056878fa2827f097fcce730a5ba37d1b10f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/millau-client/src/lib.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Millau-Substrate chain. - -use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, Client, TransactionSignScheme}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -pub use millau_runtime::BridgeRialtoCall; - -/// Millau header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Millau chain definition. -#[derive(Debug, Clone, Copy)] -pub struct Millau; - -impl ChainBase for Millau { - type BlockNumber = millau_runtime::BlockNumber; - type Hash = millau_runtime::Hash; - type Hasher = millau_runtime::Hashing; - type Header = millau_runtime::Header; -} - -impl Chain for Millau { - const NAME: &'static str = "Millau"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); - - type AccountId = millau_runtime::AccountId; - type Index = millau_runtime::Index; - type SignedBlock = millau_runtime::SignedBlock; - type Call = millau_runtime::Call; -} - -impl ChainWithBalances for Millau { - type NativeBalance = millau_runtime::Balance; - - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - use frame_support::storage::generator::StorageMap; - StorageKey(frame_system::Account::::storage_map_final_key( - account_id, - )) - } -} - -impl TransactionSignScheme for Millau { - type Chain = Millau; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = millau_runtime::UncheckedExtrinsic; - - fn sign_transaction( - client: &Client, - signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, - ) -> Self::SignedTransaction { - let raw_payload = SignedPayload::from_raw( - call, - ( - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(sp_runtime::generic::Era::Immortal), - frame_system::CheckNonce::::from(signer_nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ), - ( - millau_runtime::VERSION.spec_version, - millau_runtime::VERSION.transaction_version, - *client.genesis_hash(), - *client.genesis_hash(), - (), - (), - (), - ), - ); - let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); - let signer: sp_runtime::MultiSigner = signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - millau_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra) - } -} - -/// Millau signing params. -#[derive(Clone)] -pub struct SigningParams { - /// Substrate transactions signer. - pub signer: sp_core::sr25519::Pair, -} - -impl SigningParams { - /// Create signing params from SURI and password. - pub fn from_suri(suri: &str, password: Option<&str>) -> Result { - Ok(SigningParams { - signer: sp_core::sr25519::Pair::from_string(suri, password)?, - }) - } -} - -impl std::fmt::Debug for SigningParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.signer.public()) - } -} - -/// Millau header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/bridges/relays/polkadot-client/Cargo.toml b/polkadot/bridges/relays/polkadot-client/Cargo.toml deleted file mode 100644 index 8764b6509b2424abe8dfd66eccdef2940c9d3310..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/polkadot-client/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "relay-polkadot-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } -relay-utils = { path = "../utils" } - -# Bridge dependencies - -bp-polkadot = { path = "../../primitives/polkadot" } - -# Substrate Dependencies - -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/polkadot-client/src/lib.rs b/polkadot/bridges/relays/polkadot-client/src/lib.rs deleted file mode 100644 index 7f85de13632c3ae0df4da5514c630ac5d156634e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/polkadot-client/src/lib.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Polkadot chain. - -use relay_substrate_client::{Chain, ChainBase}; -use std::time::Duration; - -/// Polkadot header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Polkadot chain definition -#[derive(Debug, Clone, Copy)] -pub struct Polkadot; - -impl ChainBase for Polkadot { - type BlockNumber = bp_polkadot::BlockNumber; - type Hash = bp_polkadot::Hash; - type Hasher = bp_polkadot::Hasher; - type Header = bp_polkadot::Header; -} - -impl Chain for Polkadot { - const NAME: &'static str = "Polkadot"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); - - type AccountId = bp_polkadot::AccountId; - type Index = bp_polkadot::Nonce; - type SignedBlock = bp_polkadot::SignedBlock; - type Call = (); -} - -/// Polkadot header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/bridges/relays/rialto-client/Cargo.toml b/polkadot/bridges/relays/rialto-client/Cargo.toml deleted file mode 100644 index 6142ba05c963c199e60ea4c72867b5ee0aff0f3c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/rialto-client/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "relay-rialto-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -headers-relay = { path = "../headers-relay" } -relay-substrate-client = { path = "../substrate-client" } -relay-utils = { path = "../utils" } - -# Bridge dependencies - -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/rialto-client/src/lib.rs b/polkadot/bridges/relays/rialto-client/src/lib.rs deleted file mode 100644 index 9e38831d56c435b100816893b4cf16096a5a313c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/rialto-client/src/lib.rs +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Rialto-Substrate chain. - -use codec::Encode; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, Client, TransactionSignScheme}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -pub use rialto_runtime::BridgeMillauCall; - -/// Rialto header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Rialto chain definition -#[derive(Debug, Clone, Copy)] -pub struct Rialto; - -impl ChainBase for Rialto { - type BlockNumber = rialto_runtime::BlockNumber; - type Hash = rialto_runtime::Hash; - type Hasher = rialto_runtime::Hashing; - type Header = rialto_runtime::Header; -} - -impl Chain for Rialto { - const NAME: &'static str = "Rialto"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); - - type AccountId = rialto_runtime::AccountId; - type Index = rialto_runtime::Index; - type SignedBlock = rialto_runtime::SignedBlock; - type Call = rialto_runtime::Call; -} - -impl ChainWithBalances for Rialto { - type NativeBalance = rialto_runtime::Balance; - - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - use frame_support::storage::generator::StorageMap; - StorageKey(frame_system::Account::::storage_map_final_key( - account_id, - )) - } -} - -impl TransactionSignScheme for Rialto { - type Chain = Rialto; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = rialto_runtime::UncheckedExtrinsic; - - fn sign_transaction( - client: &Client, - signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, - ) -> Self::SignedTransaction { - let raw_payload = SignedPayload::from_raw( - call, - ( - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(sp_runtime::generic::Era::Immortal), - frame_system::CheckNonce::::from(signer_nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(0), - ), - ( - rialto_runtime::VERSION.spec_version, - rialto_runtime::VERSION.transaction_version, - *client.genesis_hash(), - *client.genesis_hash(), - (), - (), - (), - ), - ); - let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); - let signer: sp_runtime::MultiSigner = signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - rialto_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra) - } -} - -/// Rialto signing params. -#[derive(Clone)] -pub struct SigningParams { - /// Substrate transactions signer. - pub signer: sp_core::sr25519::Pair, -} - -impl SigningParams { - /// Create signing params from SURI and password. - pub fn from_suri(suri: &str, password: Option<&str>) -> Result { - Ok(SigningParams { - signer: sp_core::sr25519::Pair::from_string(suri, password)?, - }) - } -} - -impl std::fmt::Debug for SigningParams { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.signer.public()) - } -} - -impl Default for SigningParams { - fn default() -> Self { - SigningParams { - signer: sp_keyring::AccountKeyring::Alice.pair(), - } - } -} - -/// Rialto header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/bridges/relays/substrate-client/Cargo.toml b/polkadot/bridges/relays/substrate-client/Cargo.toml deleted file mode 100644 index 016a7d7d33712c011f5436716b614750346a0de8..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "relay-substrate-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -codec = { package = "parity-scale-codec", version = "2.0.0" } -jsonrpsee = { git = "https://github.com/svyatonik/jsonrpsee.git", branch = "shared-client-in-rpc-api", default-features = false, features = ["ws"] } -log = "0.4.11" -num-traits = "0.2" -rand = "0.7" - -# Bridge dependencies - -bp-message-lane = { path = "../../primitives/message-lane" } -bp-runtime = { path = "../../primitives/runtime" } -headers-relay = { path = "../headers-relay" } -relay-utils = { path = "../utils" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } - -#[dev-dependencies] -futures = "0.3.7" diff --git a/polkadot/bridges/relays/substrate-client/src/chain.rs b/polkadot/bridges/relays/substrate-client/src/chain.rs deleted file mode 100644 index f309c3f775e76eea6f56302e57fd322f4502b1ff..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/chain.rs +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::client::Client; - -use bp_runtime::Chain as ChainBase; -use frame_support::Parameter; -use jsonrpsee::common::{DeserializeOwned, Serialize}; -use num_traits::{CheckedSub, Zero}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{ - generic::SignedBlock, - traits::{AtLeast32Bit, Dispatchable, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member}, - Justification, -}; -use std::{fmt::Debug, time::Duration}; - -/// Substrate-based chain from minimal relay-client point of view. -pub trait Chain: ChainBase { - /// Chain name. - const NAME: &'static str; - /// Average block interval. - /// - /// How often blocks are produced on that chain. It's suggested to set this value - /// to match the block time of the chain. - const AVERAGE_BLOCK_INTERVAL: Duration; - - /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default; - /// Index of a transaction used by the chain. - type Index: Parameter - + Member - + MaybeSerialize - + Debug - + Default - + MaybeDisplay - + DeserializeOwned - + AtLeast32Bit - + Copy; - /// Block type. - type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; - /// The aggregated `Call` type. - type Call: Dispatchable + Debug; -} - -/// Substrate-based chain with `frame_system::Config::AccountData` set to -/// the `pallet_balances::AccountData`. -pub trait ChainWithBalances: Chain { - /// Balance of an account in native tokens. - type NativeBalance: Parameter + Member + DeserializeOwned + Clone + Copy + CheckedSub + PartialOrd + Zero; - - /// Return runtime storage key for getting `frame_system::AccountInfo` of given account. - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey; -} - -/// Block with justification. -pub trait BlockWithJustification { - /// Return block justification, if known. - fn justification(&self) -> Option<&Justification>; -} - -/// Substrate-based chain transactions signing scheme. -pub trait TransactionSignScheme { - /// Chain that this scheme is to be used. - type Chain: Chain; - /// Type of key pairs used to sign transactions. - type AccountKeyPair: Pair; - /// Signed transaction. - type SignedTransaction; - - /// Create transaction for given runtime call, signed by given account. - fn sign_transaction( - client: &Client, - signer: &Self::AccountKeyPair, - signer_nonce: ::Index, - call: ::Call, - ) -> Self::SignedTransaction; -} - -impl BlockWithJustification for () { - fn justification(&self) -> Option<&Justification> { - None - } -} - -impl BlockWithJustification for SignedBlock { - fn justification(&self) -> Option<&Justification> { - self.justification.as_ref() - } -} diff --git a/polkadot/bridges/relays/substrate-client/src/client.rs b/polkadot/bridges/relays/substrate-client/src/client.rs deleted file mode 100644 index 767002d68654368b60920e16c2c7b1a0bedacb46..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/client.rs +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node client. - -use crate::chain::{Chain, ChainWithBalances}; -use crate::rpc::{Substrate, SubstrateMessageLane}; -use crate::{ConnectionParams, Error, Result}; - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; -use codec::Decode; -use frame_system::AccountInfo; -use jsonrpsee::common::DeserializeOwned; -use jsonrpsee::raw::RawClient; -use jsonrpsee::transport::ws::WsTransportClient; -use jsonrpsee::{client::Subscription, Client as RpcClient}; -use num_traits::Zero; -use pallet_balances::AccountData; -use sp_core::Bytes; -use sp_trie::StorageProof; -use sp_version::RuntimeVersion; -use std::ops::RangeInclusive; - -const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; - -/// Opaque justifications subscription type. -pub type JustificationsSubscription = Subscription; - -/// Opaque GRANDPA authorities set. -pub type OpaqueGrandpaAuthoritiesSet = Vec; - -/// Substrate client type. -/// -/// Cloning `Client` is a cheap operation. -pub struct Client { - /// Client connection params. - params: ConnectionParams, - /// Substrate RPC client. - client: RpcClient, - /// Genesis block hash. - genesis_hash: C::Hash, -} - -impl Clone for Client { - fn clone(&self) -> Self { - Client { - params: self.params.clone(), - client: self.client.clone(), - genesis_hash: self.genesis_hash, - } - } -} - -impl std::fmt::Debug for Client { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client") - .field("genesis_hash", &self.genesis_hash) - .finish() - } -} - -impl Client { - /// Returns client that is able to call RPCs on Substrate node over websocket connection. - pub async fn new(params: ConnectionParams) -> Result { - let client = Self::build_client(params.clone()).await?; - - let number: C::BlockNumber = Zero::zero(); - let genesis_hash = Substrate::::chain_get_block_hash(&client, number).await?; - - Ok(Self { - params, - client, - genesis_hash, - }) - } - - /// Reopen client connection. - pub async fn reconnect(&mut self) -> Result<()> { - self.client = Self::build_client(self.params.clone()).await?; - Ok(()) - } - - /// Build client to use in connection. - async fn build_client(params: ConnectionParams) -> Result { - let uri = format!("ws://{}:{}", params.host, params.port); - let transport = WsTransportClient::new(&uri).await?; - let raw_client = RawClient::new(transport); - Ok(raw_client.into()) - } -} - -impl Client { - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - let health = Substrate::::system_health(&self.client).await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } - } - - /// Return hash of the genesis block. - pub fn genesis_hash(&self) -> &C::Hash { - &self.genesis_hash - } - - /// Return hash of the best finalized block. - pub async fn best_finalized_header_hash(&self) -> Result { - Ok(Substrate::::chain_get_finalized_head(&self.client).await?) - } - - /// Returns the best Substrate header. - pub async fn best_header(&self) -> Result - where - C::Header: DeserializeOwned, - { - Ok(Substrate::::chain_get_header(&self.client, None).await?) - } - - /// Get a Substrate block from its hash. - pub async fn get_block(&self, block_hash: Option) -> Result { - Ok(Substrate::::chain_get_block(&self.client, block_hash).await?) - } - - /// Get a Substrate header by its hash. - pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result - where - C::Header: DeserializeOwned, - { - Ok(Substrate::::chain_get_header(&self.client, block_hash).await?) - } - - /// Get a Substrate block hash by its number. - pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - Ok(Substrate::::chain_get_block_hash(&self.client, number).await?) - } - - /// Get a Substrate header by its number. - pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result - where - C::Header: DeserializeOwned, - { - let block_hash = Self::block_hash_by_number(self, block_number).await?; - Ok(Self::header_by_hash(self, block_hash).await?) - } - - /// Return runtime version. - pub async fn runtime_version(&self) -> Result { - Ok(Substrate::::runtime_version(&self.client).await?) - } - - /// Return native tokens balance of the account. - pub async fn free_native_balance(&self, account: C::AccountId) -> Result - where - C: ChainWithBalances, - { - let storage_key = C::account_info_storage_key(&account); - let encoded_account_data = Substrate::::get_storage(&self.client, storage_key) - .await? - .ok_or(Error::AccountDoesNotExist)?; - let decoded_account_data = - AccountInfo::>::decode(&mut &encoded_account_data.0[..]) - .map_err(Error::ResponseParseFailed)?; - Ok(decoded_account_data.data.free) - } - - /// Get the nonce of the given Substrate account. - /// - /// Note: It's the caller's responsibility to make sure `account` is a valid ss58 address. - pub async fn next_account_index(&self, account: C::AccountId) -> Result { - Ok(Substrate::::system_account_next_index(&self.client, account).await?) - } - - /// Submit an extrinsic for inclusion in a block. - /// - /// Note: The given transaction does not need be SCALE encoded beforehand. - pub async fn submit_extrinsic(&self, transaction: Bytes) -> Result { - let tx_hash = Substrate::::author_submit_extrinsic(&self.client, transaction).await?; - log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash); - Ok(tx_hash) - } - - /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set(&self, block: C::Hash) -> Result { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = Substrate::::state_call(&self.client, call, data, Some(block)).await?; - let authority_list = encoded_response.0; - - Ok(authority_list) - } - - /// Execute runtime call at given block. - pub async fn state_call(&self, method: String, data: Bytes, at_block: Option) -> Result { - Substrate::::state_call(&self.client, method, data, at_block) - .await - .map_err(Into::into) - } - - /// Returns proof-of-message(s) in given inclusive range. - pub async fn prove_messages( - &self, - instance: InstanceId, - lane: LaneId, - range: RangeInclusive, - include_outbound_lane_state: bool, - at_block: C::Hash, - ) -> Result { - let encoded_trie_nodes = SubstrateMessageLane::::prove_messages( - &self.client, - instance, - lane, - *range.start(), - *range.end(), - include_outbound_lane_state, - Some(at_block), - ) - .await - .map_err(Error::Request)?; - let decoded_trie_nodes: Vec> = - Decode::decode(&mut &encoded_trie_nodes[..]).map_err(Error::ResponseParseFailed)?; - Ok(StorageProof::new(decoded_trie_nodes)) - } - - /// Returns proof-of-message(s) delivery. - pub async fn prove_messages_delivery( - &self, - instance: InstanceId, - lane: LaneId, - at_block: C::Hash, - ) -> Result>> { - let encoded_trie_nodes = - SubstrateMessageLane::::prove_messages_delivery(&self.client, instance, lane, Some(at_block)) - .await - .map_err(Error::Request)?; - let decoded_trie_nodes: Vec> = - Decode::decode(&mut &encoded_trie_nodes[..]).map_err(Error::ResponseParseFailed)?; - Ok(decoded_trie_nodes) - } - - /// Return new justifications stream. - pub async fn subscribe_justifications(self) -> Result { - Ok(self - .client - .subscribe( - "grandpa_subscribeJustifications", - jsonrpsee::common::Params::None, - "grandpa_unsubscribeJustifications", - ) - .await?) - } -} diff --git a/polkadot/bridges/relays/substrate-client/src/error.rs b/polkadot/bridges/relays/substrate-client/src/error.rs deleted file mode 100644 index 67aefe9885534794bf83f0f93e07367679030fb5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/error.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node RPC errors. - -use jsonrpsee::client::RequestError; -use jsonrpsee::transport::ws::WsNewDnsError; -use relay_utils::MaybeConnectionError; -use sc_rpc_api::system::Health; - -/// Result type used by Substrate client. -pub type Result = std::result::Result; - -/// Errors that can occur only when interacting with -/// a Substrate node through RPC. -#[derive(Debug)] -pub enum Error { - /// Web socket connection error. - WsConnectionError(WsNewDnsError), - /// An error that can occur when making a request to - /// an JSON-RPC server. - Request(RequestError), - /// The response from the server could not be SCALE decoded. - ResponseParseFailed(codec::Error), - /// The Substrate bridge pallet has not yet been initialized. - UninitializedBridgePallet, - /// Account does not exist on the chain. - AccountDoesNotExist, - /// The client we're connected to is not synced, so we can't rely on its state. - ClientNotSynced(Health), - /// Custom logic error. - Custom(String), -} - -impl From for Error { - fn from(error: WsNewDnsError) -> Self { - Error::WsConnectionError(error) - } -} - -impl From for Error { - fn from(error: RequestError) -> Self { - Error::Request(error) - } -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - matches!( - *self, - Error::Request(RequestError::TransportError(_)) | Error::ClientNotSynced(_) - ) - } -} - -impl From for String { - fn from(error: Error) -> String { - error.to_string() - } -} - -impl ToString for Error { - fn to_string(&self) -> String { - match self { - Self::WsConnectionError(e) => e.to_string(), - Self::Request(e) => e.to_string(), - Self::ResponseParseFailed(e) => e.to_string(), - Self::UninitializedBridgePallet => "The Substrate bridge pallet has not been initialized yet.".into(), - Self::AccountDoesNotExist => "Account does not exist on the chain".into(), - Self::ClientNotSynced(health) => format!("Substrate client is not synced: {}", health), - Self::Custom(e) => e.clone(), - } - } -} diff --git a/polkadot/bridges/relays/substrate-client/src/guard.rs b/polkadot/bridges/relays/substrate-client/src/guard.rs deleted file mode 100644 index d439ec890763db63e0c17fbfcab9aee991f1e8f6..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/guard.rs +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module provides a set of guard functions that are running in background threads -//! and are aborting process if some condition fails. - -use crate::{Chain, ChainWithBalances, Client}; - -use async_trait::async_trait; -use num_traits::CheckedSub; -use sp_version::RuntimeVersion; -use std::{ - collections::VecDeque, - time::{Duration, Instant}, -}; - -/// Guards environment. -#[async_trait] -pub trait Environment: Send + Sync + 'static { - /// Return current runtime version. - async fn runtime_version(&mut self) -> Result; - /// Return free native balance of the account on the chain. - async fn free_native_balance(&mut self, account: C::AccountId) -> Result; - - /// Return current time. - fn now(&self) -> Instant { - Instant::now() - } - /// Sleep given amount of time. - async fn sleep(&mut self, duration: Duration) { - async_std::task::sleep(duration).await - } - /// Abort current process. Called when guard condition check fails. - async fn abort(&mut self) { - std::process::abort(); - } -} - -/// Abort when runtime spec version is different from specified. -pub fn abort_on_spec_version_change(mut env: impl Environment, expected_spec_version: u32) { - async_std::task::spawn(async move { - loop { - let actual_spec_version = env.runtime_version().await; - match actual_spec_version { - Ok(version) if version.spec_version == expected_spec_version => (), - Ok(version) => { - log::error!( - target: "bridge-guard", - "{} runtime spec version has changed from {} to {}. Aborting relay", - C::NAME, - expected_spec_version, - version.spec_version, - ); - - env.abort().await; - } - Err(error) => log::warn!( - target: "bridge-guard", - "Failed to read {} runtime version: {:?}. Relay may need to be stopped manually", - C::NAME, - error, - ), - } - - env.sleep(conditions_check_delay::()).await; - } - }); -} - -/// Abort if, during a 24 hours, free balance of given account is decreased at least by given value. -/// Other components may increase (or decrease) balance of account and it WILL affect logic of the guard. -pub fn abort_when_account_balance_decreased( - mut env: impl Environment, - account_id: C::AccountId, - maximal_decrease: C::NativeBalance, -) { - const DAY: Duration = Duration::from_secs(60 * 60 * 24); - - async_std::task::spawn(async move { - let mut balances = VecDeque::new(); - - loop { - let current_time = env.now(); - - // remember balances that are beyound 24h border - let time_border = current_time - DAY; - while balances.front().map(|(time, _)| *time < time_border).unwrap_or(false) { - balances.pop_front(); - } - - // read balance of the account - let current_balance = env.free_native_balance(account_id.clone()).await; - - // remember balance and check difference - match current_balance { - Ok(current_balance) => { - // remember balance - balances.push_back((current_time, current_balance)); - - // check if difference between current and oldest balance is too large - let (oldest_time, oldest_balance) = - balances.front().expect("pushed to queue couple of lines above; qed"); - let balances_difference = oldest_balance.checked_sub(¤t_balance); - if balances_difference > Some(maximal_decrease) { - log::error!( - target: "bridge-guard", - "Balance of {} account {:?} has decreased from {:?} to {:?} in {} minutes. Aborting relay", - C::NAME, - account_id, - oldest_balance, - current_balance, - current_time.duration_since(*oldest_time).as_secs() / 60, - ); - - env.abort().await; - } - } - Err(error) => { - log::warn!( - target: "bridge-guard", - "Failed to read {} account {:?} balance: {:?}. Relay may need to be stopped manually", - C::NAME, - account_id, - error, - ); - } - }; - - env.sleep(conditions_check_delay::()).await; - } - }); -} - -/// Delay between conditions check. -fn conditions_check_delay() -> Duration { - C::AVERAGE_BLOCK_INTERVAL * (10 + rand::random::() % 10) -} - -#[async_trait] -impl Environment for Client { - async fn runtime_version(&mut self) -> Result { - Client::::runtime_version(self).await.map_err(|e| e.to_string()) - } - - async fn free_native_balance(&mut self, account: C::AccountId) -> Result { - Client::::free_native_balance(self, account) - .await - .map_err(|e| e.to_string()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use futures::{ - channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, - future::FutureExt, - stream::StreamExt, - SinkExt, - }; - - struct TestChain; - - impl bp_runtime::Chain for TestChain { - type BlockNumber = u32; - type Hash = sp_core::H256; - type Hasher = sp_runtime::traits::BlakeTwo256; - type Header = sp_runtime::generic::Header; - } - - impl Chain for TestChain { - const NAME: &'static str = "Test"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(1); - - type AccountId = u32; - type Index = u32; - type SignedBlock = (); - type Call = (); - } - - impl ChainWithBalances for TestChain { - type NativeBalance = u32; - - fn account_info_storage_key(_account_id: &u32) -> sp_core::storage::StorageKey { - unreachable!() - } - } - - struct TestEnvironment { - runtime_version_rx: UnboundedReceiver, - free_native_balance_rx: UnboundedReceiver, - slept_tx: UnboundedSender<()>, - aborted_tx: UnboundedSender<()>, - } - - #[async_trait] - impl Environment for TestEnvironment { - async fn runtime_version(&mut self) -> Result { - Ok(self.runtime_version_rx.next().await.unwrap_or_default()) - } - - async fn free_native_balance(&mut self, _account: u32) -> Result { - Ok(self.free_native_balance_rx.next().await.unwrap_or_default()) - } - - async fn sleep(&mut self, _duration: Duration) { - let _ = self.slept_tx.send(()).await; - } - - async fn abort(&mut self) { - let _ = self.aborted_tx.send(()).await; - // simulate process abort :) - async_std::task::sleep(Duration::from_secs(60)).await; - } - } - - #[test] - fn aborts_when_spec_version_is_changed() { - async_std::task::block_on(async { - let ( - (mut runtime_version_tx, runtime_version_rx), - (_free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_on_spec_version_change( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 0, - ); - - // client responds with wrong version - runtime_version_tx - .send(RuntimeVersion { - spec_version: 42, - ..Default::default() - }) - .await - .unwrap(); - - // then the `abort` function is called - aborted_rx.next().await; - // and we do not reach the `sleep` function call - assert!(slept_rx.next().now_or_never().is_none()); - }); - } - - #[test] - fn does_not_aborts_when_spec_version_is_unchanged() { - async_std::task::block_on(async { - let ( - (mut runtime_version_tx, runtime_version_rx), - (_free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_on_spec_version_change( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 42, - ); - - // client responds with the same version - runtime_version_tx - .send(RuntimeVersion { - spec_version: 42, - ..Default::default() - }) - .await - .unwrap(); - - // then the `sleep` function is called - slept_rx.next().await; - // and the `abort` function is not called - assert!(aborted_rx.next().now_or_never().is_none()); - }); - } - - #[test] - fn aborts_when_balance_is_too_low() { - async_std::task::block_on(async { - let ( - (_runtime_version_tx, runtime_version_rx), - (mut free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_when_account_balance_decreased( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 0, - 100, - ); - - // client responds with initial balance - free_native_balance_tx.send(1000).await.unwrap(); - - // then the guard sleeps - slept_rx.next().await; - - // and then client responds with updated balance, which is too low - free_native_balance_tx.send(899).await.unwrap(); - - // then the `abort` function is called - aborted_rx.next().await; - // and we do not reach next `sleep` function call - assert!(slept_rx.next().now_or_never().is_none()); - }); - } - - #[test] - fn does_not_aborts_when_balance_is_enough() { - async_std::task::block_on(async { - let ( - (_runtime_version_tx, runtime_version_rx), - (mut free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_when_account_balance_decreased( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 0, - 100, - ); - - // client responds with initial balance - free_native_balance_tx.send(1000).await.unwrap(); - - // then the guard sleeps - slept_rx.next().await; - - // and then client responds with updated balance, which is enough - free_native_balance_tx.send(950).await.unwrap(); - - // then the `sleep` function is called - slept_rx.next().await; - // and `abort` is not called - assert!(aborted_rx.next().now_or_never().is_none()); - }); - } -} diff --git a/polkadot/bridges/relays/substrate-client/src/headers_source.rs b/polkadot/bridges/relays/substrate-client/src/headers_source.rs deleted file mode 100644 index b347a1c9f57f2c6950ad8224a97c47d5983f7877..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/headers_source.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Default generic implementation of headers source for basic Substrate client. - -use crate::chain::{BlockWithJustification, Chain}; -use crate::client::Client; -use crate::error::Error; - -use async_trait::async_trait; -use headers_relay::{ - sync_loop::SourceClient, - sync_types::{HeaderIdOf, HeadersSyncPipeline, QueuedHeader, SourceHeader}, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_runtime::{traits::Header as HeaderT, Justification}; -use std::marker::PhantomData; - -/// Substrate node as headers source. -pub struct HeadersSource { - client: Client, - _phantom: PhantomData

, -} - -impl HeadersSource { - /// Create new headers source using given client. - pub fn new(client: Client) -> Self { - HeadersSource { - client, - _phantom: Default::default(), - } - } -} - -impl Clone for HeadersSource { - fn clone(&self) -> Self { - HeadersSource { - client: self.client.clone(), - _phantom: Default::default(), - } - } -} - -#[async_trait] -impl RelayClient for HeadersSource { - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl SourceClient

for HeadersSource -where - C: Chain, - C::BlockNumber: relay_utils::BlockNumberBase, - C::Header: Into, - P: HeadersSyncPipeline, - P::Header: SourceHeader, -{ - async fn best_block_number(&self) -> Result { - // we **CAN** continue to relay headers if source node is out of sync, because - // target node may be missing headers that are already available at the source - Ok(*self.client.best_header().await?.number()) - } - - async fn header_by_hash(&self, hash: P::Hash) -> Result { - self.client - .header_by_hash(hash) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_by_number(&self, number: P::Number) -> Result { - self.client - .header_by_number(number) - .await - .map(Into::into) - .map_err(Into::into) - } - - async fn header_completion(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, Option), Error> { - let hash = id.1; - let signed_block = self.client.get_block(Some(hash)).await?; - let grandpa_justification = signed_block.justification().cloned(); - - Ok((id, grandpa_justification)) - } - - async fn header_extra(&self, id: HeaderIdOf

, _header: QueuedHeader

) -> Result<(HeaderIdOf

, ()), Error> { - Ok((id, ())) - } -} diff --git a/polkadot/bridges/relays/substrate-client/src/lib.rs b/polkadot/bridges/relays/substrate-client/src/lib.rs deleted file mode 100644 index c6d077b21c562014bbdff35bb339f3b22bbdd10e..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/lib.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools to interact with (Open) Ethereum node using RPC methods. - -#![warn(missing_docs)] - -mod chain; -mod client; -mod error; -mod rpc; -mod sync_header; - -pub mod guard; -pub mod headers_source; - -pub use crate::chain::{BlockWithJustification, Chain, ChainWithBalances, TransactionSignScheme}; -pub use crate::client::{Client, JustificationsSubscription, OpaqueGrandpaAuthoritiesSet}; -pub use crate::error::{Error, Result}; -pub use crate::sync_header::SyncHeader; -pub use bp_runtime::{BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf}; - -/// Header id used by the chain. -pub type HeaderIdOf = relay_utils::HeaderId, BlockNumberOf>; - -/// Substrate-over-websocket connection params. -#[derive(Debug, Clone)] -pub struct ConnectionParams { - /// Websocket server hostname. - pub host: String, - /// Websocket server TCP port. - pub port: u16, -} - -impl Default for ConnectionParams { - fn default() -> Self { - ConnectionParams { - host: "localhost".into(), - port: 9944, - } - } -} diff --git a/polkadot/bridges/relays/substrate-client/src/rpc.rs b/polkadot/bridges/relays/substrate-client/src/rpc.rs deleted file mode 100644 index 2e832b4018174f0c42880c0862fa0c976c74e8e1..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/rpc.rs +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The most generic Substrate node RPC interface. - -// The compiler doesn't think we're using the -// code from rpc_api! -#![allow(dead_code)] -#![allow(unused_variables)] - -use crate::chain::Chain; - -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; -use sc_rpc_api::system::Health; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, -}; -use sp_version::RuntimeVersion; - -jsonrpsee::rpc_api! { - pub(crate) Substrate { - #[rpc(method = "system_health", positional_params)] - fn system_health() -> Health; - #[rpc(method = "chain_getHeader", positional_params)] - fn chain_get_header(block_hash: Option) -> C::Header; - #[rpc(method = "chain_getFinalizedHead", positional_params)] - fn chain_get_finalized_head() -> C::Hash; - #[rpc(method = "chain_getBlock", positional_params)] - fn chain_get_block(block_hash: Option) -> C::SignedBlock; - #[rpc(method = "chain_getBlockHash", positional_params)] - fn chain_get_block_hash(block_number: Option) -> C::Hash; - #[rpc(method = "system_accountNextIndex", positional_params)] - fn system_account_next_index(account_id: C::AccountId) -> C::Index; - #[rpc(method = "author_submitExtrinsic", positional_params)] - fn author_submit_extrinsic(extrinsic: Bytes) -> C::Hash; - #[rpc(method = "state_call", positional_params)] - fn state_call(method: String, data: Bytes, at_block: Option) -> Bytes; - #[rpc(method = "state_getStorage", positional_params)] - fn get_storage(key: StorageKey) -> Option; - #[rpc(method = "state_getRuntimeVersion", positional_params)] - fn runtime_version() -> RuntimeVersion; - } - - pub(crate) SubstrateMessageLane { - #[rpc(method = "messageLane_proveMessages", positional_params)] - fn prove_messages( - instance: InstanceId, - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - include_outbound_lane_state: bool, - block: Option, - ) -> Bytes; - - #[rpc(method = "messageLane_proveMessagesDelivery", positional_params)] - fn prove_messages_delivery( - instance: InstanceId, - lane: LaneId, - block: Option, - ) -> Bytes; - } -} diff --git a/polkadot/bridges/relays/substrate-client/src/sync_header.rs b/polkadot/bridges/relays/substrate-client/src/sync_header.rs deleted file mode 100644 index fd1c582b9480ee48e48f4cf8e3b66924fe791300..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate-client/src/sync_header.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use headers_relay::sync_types::SourceHeader; -use num_traits::{CheckedSub, One}; -use relay_utils::HeaderId; -use sp_runtime::traits::Header as HeaderT; - -/// Generic wrapper for `sp_runtime::traits::Header` based headers, that -/// implements `headers_relay::sync_types::SourceHeader` and may be used in headers sync directly. -#[derive(Clone, Debug, PartialEq)] -pub struct SyncHeader

(Header); - -impl
SyncHeader
{ - /// Extracts wrapped header from self. - pub fn into_inner(self) -> Header { - self.0 - } -} - -impl
std::ops::Deref for SyncHeader
{ - type Target = Header; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl
From
for SyncHeader
{ - fn from(header: Header) -> Self { - Self(header) - } -} - -impl SourceHeader for SyncHeader
{ - fn id(&self) -> HeaderId { - relay_utils::HeaderId(*self.number(), self.hash()) - } - - fn parent_id(&self) -> HeaderId { - relay_utils::HeaderId( - self.number() - .checked_sub(&One::one()) - .expect("should never be called for genesis header"), - *self.parent_hash(), - ) - } -} diff --git a/polkadot/bridges/relays/substrate/Cargo.toml b/polkadot/bridges/relays/substrate/Cargo.toml deleted file mode 100644 index 120501e511feecc79110cbee3f1769563dfd2134..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[package] -name = "substrate-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = "1.9.0" -async-trait = "0.1.42" -codec = { package = "parity-scale-codec", version = "2.0.0" } -futures = "0.3.12" -hex = "0.4" -log = "0.4.14" -num-traits = "0.2" -paste = "1.0" -structopt = "0.3" - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-kusama = { path = "../../primitives/kusama" } -bp-message-lane = { path = "../../primitives/message-lane" } -bp-millau = { path = "../../primitives/millau" } -bp-polkadot = { path = "../../primitives/polkadot" } -bp-runtime = { path = "../../primitives/runtime" } -bp-rialto = { path = "../../primitives/rialto" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -headers-relay = { path = "../headers-relay" } -messages-relay = { path = "../messages-relay" } -millau-runtime = { path = "../../bin/millau/runtime" } -pallet-bridge-call-dispatch = { path = "../../modules/call-dispatch" } -pallet-message-lane = { path = "../../modules/message-lane" } -pallet-substrate-bridge = { path = "../../modules/substrate" } -relay-kusama-client = { path = "../kusama-client" } -relay-millau-client = { path = "../millau-client" } -relay-polkadot-client = { path = "../polkadot-client" } -relay-rialto-client = { path = "../rialto-client" } -relay-substrate-client = { path = "../substrate-client" } -relay-utils = { path = "../utils" } -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/substrate/src/cli.rs b/polkadot/bridges/relays/substrate/src/cli.rs deleted file mode 100644 index 8dc241f069e1cb7e835a612f3fd1f462d4308ddd..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/cli.rs +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Deal with CLI args of substrate-to-substrate relay. - -use bp_message_lane::LaneId; -use frame_support::weights::Weight; -use sp_core::Bytes; -use sp_finality_grandpa::SetId as GrandpaAuthoritiesSetId; -use structopt::{clap::arg_enum, StructOpt}; - -/// Parse relay CLI args. -pub fn parse_args() -> Command { - Command::from_args() -} - -/// Substrate-to-Substrate bridge utilities. -#[derive(StructOpt)] -#[structopt(about = "Substrate-to-Substrate relay")] -pub enum Command { - /// Start headers relay between two chains. - /// - /// The on-chain bridge component should have been already initialized with - /// `init-bridge` sub-command. - RelayHeaders(RelayHeaders), - /// Start messages relay between two chains. - /// - /// Ties up to `MessageLane` pallets on both chains and starts relaying messages. - /// Requires the header relay to be already running. - RelayMessages(RelayMessages), - /// Initialize on-chain bridge pallet with current header data. - /// - /// Sends initialization transaction to bootstrap the bridge with current finalized block data. - InitBridge(InitBridge), - /// Send custom message over the bridge. - /// - /// Allows interacting with the bridge by sending messages over `MessageLane` component. - /// The message is being sent to the source chain, delivered to the target chain and dispatched - /// there. - SendMessage(SendMessage), -} - -#[derive(StructOpt)] -pub enum RelayHeaders { - /// Relay Millau headers to Rialto. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - }, - /// Relay Rialto headers to Millau. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - }, -} - -#[derive(StructOpt)] -pub enum RelayMessages { - /// Serve given lane of Millau -> Rialto messages. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - /// Hex-encoded id of lane that should be served by relay. - #[structopt(long)] - lane: HexLaneId, - }, - /// Serve given lane of Rialto -> Millau messages. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, - /// Hex-encoded id of lane that should be served by relay. - #[structopt(long)] - lane: HexLaneId, - }, -} - -#[derive(StructOpt)] -pub enum InitBridge { - /// Initialize Millau headers bridge in Rialto. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - millau_bridge_params: MillauBridgeInitializationParams, - }, - /// Initialize Rialto headers bridge in Millau. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - rialto_bridge_params: RialtoBridgeInitializationParams, - }, -} - -#[derive(StructOpt)] -pub enum SendMessage { - /// Submit message to given Millau -> Rialto lane. - MillauToRialto { - #[structopt(flatten)] - millau: MillauConnectionParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - /// Hex-encoded lane id. - #[structopt(long)] - lane: HexLaneId, - /// Dispatch weight of the message. If not passed, determined automatically. - #[structopt(long)] - dispatch_weight: Option>, - /// Delivery and dispatch fee. If not passed, determined automatically. - #[structopt(long)] - fee: Option, - /// Message type. - #[structopt(subcommand)] - message: ToRialtoMessage, - /// The origin to use when dispatching the message on the target chain. - #[structopt(long, possible_values = &Origins::variants())] - origin: Origins, - }, - /// Submit message to given Rialto -> Millau lane. - RialtoToMillau { - #[structopt(flatten)] - rialto: RialtoConnectionParams, - #[structopt(flatten)] - rialto_sign: RialtoSigningParams, - #[structopt(flatten)] - millau_sign: MillauSigningParams, - /// Hex-encoded lane id. - #[structopt(long)] - lane: HexLaneId, - /// Dispatch weight of the message. If not passed, determined automatically. - #[structopt(long)] - dispatch_weight: Option>, - /// Delivery and dispatch fee. If not passed, determined automatically. - #[structopt(long)] - fee: Option, - /// Message type. - #[structopt(subcommand)] - message: ToMillauMessage, - /// The origin to use when dispatching the message on the target chain. - #[structopt(long, possible_values = &Origins::variants())] - origin: Origins, - }, -} - -/// All possible messages that may be delivered to the Rialto chain. -#[derive(StructOpt, Debug)] -pub enum ToRialtoMessage { - /// Make an on-chain remark (comment). - Remark { - /// Remark size. If not passed, small UTF8-encoded string is generated by relay as remark. - #[structopt(long)] - remark_size: Option>, - }, - /// Transfer the specified `amount` of native tokens to a particular `recipient`. - Transfer { - #[structopt(long)] - recipient: bp_rialto::AccountId, - #[structopt(long)] - amount: bp_rialto::Balance, - }, -} - -/// All possible messages that may be delivered to the Millau chain. -#[derive(StructOpt, Debug)] -pub enum ToMillauMessage { - /// Make an on-chain remark (comment). - Remark { - /// Size of the remark. If not passed, small UTF8-encoded string is generated by relay as remark. - #[structopt(long)] - remark_size: Option>, - }, - /// Transfer the specified `amount` of native tokens to a particular `recipient`. - Transfer { - #[structopt(long)] - recipient: bp_millau::AccountId, - #[structopt(long)] - amount: bp_millau::Balance, - }, -} - -arg_enum! { - #[derive(Debug)] - /// The origin to use when dispatching the message on the target chain. - /// - /// - `Target` uses account existing on the target chain (requires target private key). - /// - `Origin` uses account derived from the source-chain account. - pub enum Origins { - Target, - Source, - } -} - -/// Lane id. -#[derive(Debug)] -pub struct HexLaneId(LaneId); - -impl From for LaneId { - fn from(lane_id: HexLaneId) -> LaneId { - lane_id.0 - } -} - -impl std::str::FromStr for HexLaneId { - type Err = hex::FromHexError; - - fn from_str(s: &str) -> Result { - let mut lane_id = LaneId::default(); - hex::decode_to_slice(s, &mut lane_id)?; - Ok(HexLaneId(lane_id)) - } -} - -/// Prometheus metrics params. -#[derive(StructOpt)] -pub struct PrometheusParams { - /// Do not expose a Prometheus metric endpoint. - #[structopt(long)] - pub no_prometheus: bool, - /// Expose Prometheus endpoint at given interface. - #[structopt(long, default_value = "127.0.0.1")] - pub prometheus_host: String, - /// Expose Prometheus endpoint at given port. - #[structopt(long, default_value = "9616")] - pub prometheus_port: u16, -} - -impl From for Option { - fn from(cli_params: PrometheusParams) -> Option { - if !cli_params.no_prometheus { - Some(relay_utils::metrics::MetricsParams { - host: cli_params.prometheus_host, - port: cli_params.prometheus_port, - }) - } else { - None - } - } -} - -/// Either explicit or maximal allowed value. -#[derive(Debug)] -pub enum ExplicitOrMaximal { - /// User has explicitly specified argument value. - Explicit(V), - /// Maximal allowed value for this argument. - Maximal, -} - -impl std::str::FromStr for ExplicitOrMaximal -where - V::Err: std::fmt::Debug, -{ - type Err = String; - - fn from_str(s: &str) -> Result { - if s.to_lowercase() == "max" { - return Ok(ExplicitOrMaximal::Maximal); - } - - V::from_str(s) - .map(ExplicitOrMaximal::Explicit) - .map_err(|e| format!("Failed to parse '{:?}'. Expected 'max' or explicit value", e)) - } -} - -macro_rules! declare_chain_options { - ($chain:ident, $chain_prefix:ident) => { - paste::item! { - #[doc = $chain " connection params."] - #[derive(StructOpt)] - pub struct [<$chain ConnectionParams>] { - #[doc = "Connect to " $chain " node at given host."] - #[structopt(long)] - pub [<$chain_prefix _host>]: String, - #[doc = "Connect to " $chain " node websocket server at given port."] - #[structopt(long)] - pub [<$chain_prefix _port>]: u16, - } - - #[doc = $chain " signing params."] - #[derive(StructOpt)] - pub struct [<$chain SigningParams>] { - #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer>]: String, - #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer_password>]: Option, - } - - #[doc = $chain " headers bridge initialization params."] - #[derive(StructOpt)] - pub struct [<$chain BridgeInitializationParams>] { - #[doc = "Hex-encoded " $chain " header to initialize bridge with. If not specified, genesis header is used."] - #[structopt(long)] - pub [<$chain_prefix _initial_header>]: Option, - #[doc = "Hex-encoded " $chain " GRANDPA authorities set to initialize bridge with. If not specified, set from genesis block is used."] - #[structopt(long)] - pub [<$chain_prefix _initial_authorities>]: Option, - #[doc = "Id of the " $chain " GRANDPA authorities set to initialize bridge with. If not specified, zero is used."] - #[structopt(long)] - pub [<$chain_prefix _initial_authorities_set_id>]: Option, - } - } - }; -} - -declare_chain_options!(Rialto, rialto); -declare_chain_options!(Millau, millau); diff --git a/polkadot/bridges/relays/substrate/src/headers_initialize.rs b/polkadot/bridges/relays/substrate/src/headers_initialize.rs deleted file mode 100644 index 6b66a2e9bd7fb41dc24c00c7290ea8af746299ab..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/headers_initialize.rs +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Initialize Substrate -> Substrate headers bridge. -//! -//! Initialization is a transaction that calls `initialize()` function of the -//! `pallet-substrate-bridge` pallet. This transaction brings initial header -//! and authorities set from source to target chain. The headers sync starts -//! with this header. - -use codec::Decode; -use pallet_substrate_bridge::InitializationData; -use relay_substrate_client::{Chain, Client}; -use sp_core::Bytes; -use sp_finality_grandpa::{AuthorityList as GrandpaAuthoritiesSet, SetId as GrandpaAuthoritiesSetId}; - -/// Submit headers-bridge initialization transaction. -pub async fn initialize( - source_client: Client, - target_client: Client, - raw_initial_header: Option, - raw_initial_authorities_set: Option, - initial_authorities_set_id: Option, - prepare_initialize_transaction: impl FnOnce(InitializationData) -> Result, -) { - let result = do_initialize( - source_client, - target_client, - raw_initial_header, - raw_initial_authorities_set, - initial_authorities_set_id, - prepare_initialize_transaction, - ) - .await; - - match result { - Ok(tx_hash) => log::info!( - target: "bridge", - "Successfully submitted {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - tx_hash, - ), - Err(err) => log::error!( - target: "bridge", - "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - err, - ), - } -} - -/// Craft and submit initialization transaction, returning any error that may occur. -async fn do_initialize( - source_client: Client, - target_client: Client, - raw_initial_header: Option, - raw_initial_authorities_set: Option, - initial_authorities_set_id: Option, - prepare_initialize_transaction: impl FnOnce(InitializationData) -> Result, -) -> Result { - let initialization_data = prepare_initialization_data( - source_client, - raw_initial_header, - raw_initial_authorities_set, - initial_authorities_set_id, - ) - .await?; - let initialization_tx = prepare_initialize_transaction(initialization_data)?; - let initialization_tx_hash = target_client - .submit_extrinsic(initialization_tx) - .await - .map_err(|err| format!("Failed to submit {} transaction: {:?}", TargetChain::NAME, err))?; - Ok(initialization_tx_hash) -} - -/// Prepare initialization data for the headers-bridge pallet. -async fn prepare_initialization_data( - source_client: Client, - raw_initial_header: Option, - raw_initial_authorities_set: Option, - initial_authorities_set_id: Option, -) -> Result, String> { - let source_genesis_hash = *source_client.genesis_hash(); - - let initial_header = match raw_initial_header { - Some(raw_initial_header) => SourceChain::Header::decode(&mut &raw_initial_header.0[..]) - .map_err(|err| format!("Failed to decode {} initial header: {:?}", SourceChain::NAME, err))?, - None => source_client - .header_by_hash(source_genesis_hash) - .await - .map_err(|err| format!("Failed to retrive {} genesis header: {:?}", SourceChain::NAME, err))?, - }; - - let raw_initial_authorities_set = match raw_initial_authorities_set { - Some(raw_initial_authorities_set) => raw_initial_authorities_set.0, - None => source_client - .grandpa_authorities_set(source_genesis_hash) - .await - .map_err(|err| { - format!( - "Failed to retrive {} authorities set at genesis header: {:?}", - SourceChain::NAME, - err - ) - })?, - }; - let initial_authorities_set = - GrandpaAuthoritiesSet::decode(&mut &raw_initial_authorities_set[..]).map_err(|err| { - format!( - "Failed to decode {} initial authorities set: {:?}", - SourceChain::NAME, - err - ) - })?; - - Ok(InitializationData { - header: initial_header, - authority_list: initial_authorities_set, - set_id: initial_authorities_set_id.unwrap_or(0), - // There may be multiple scheduled changes, so on real chains we should select proper - // moment, when there's nothing scheduled. On ephemeral (temporary) chains, it is ok to - // start with genesis. - scheduled_change: None, - is_halted: false, - }) -} diff --git a/polkadot/bridges/relays/substrate/src/headers_maintain.rs b/polkadot/bridges/relays/substrate/src/headers_maintain.rs deleted file mode 100644 index 14432487ea30814a74b3fe969afda09c015ecd5b..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/headers_maintain.rs +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate-to-Substrate headers synchronization maintain procedure. -//! -//! Regular headers synchronization only depends on persistent justifications -//! that are generated when authorities set changes. This happens rarely on -//! real-word chains. So some other way to finalize headers is required. -//! -//! Full nodes are listening to GRANDPA messages, so they may have track authorities -//! votes on their own. They're returning both persistent and ephemeral justifications -//! (justifications that are not stored in the database and not broadcasted over network) -//! throught `grandpa_subscribeJustifications` RPC subscription. -//! -//! The idea of this maintain procedure is that when we see justification that 'improves' -//! best finalized header on the target chain, we submit this justification to the target -//! node. - -use crate::headers_pipeline::SubstrateHeadersSyncPipeline; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use codec::{Decode, Encode}; -use futures::future::{poll_fn, FutureExt, TryFutureExt}; -use headers_relay::{ - sync::HeadersSync, - sync_loop::SyncMaintain, - sync_types::{HeaderIdOf, HeaderStatus}, -}; -use relay_substrate_client::{Chain, Client, Error as SubstrateError, JustificationsSubscription}; -use relay_utils::HeaderId; -use sp_core::Bytes; -use sp_runtime::{traits::Header as HeaderT, Justification}; -use std::{collections::VecDeque, marker::PhantomData, task::Poll}; - -/// Substrate-to-Substrate headers synchronization maintain procedure. -pub struct SubstrateHeadersToSubstrateMaintain { - pipeline: P, - target_client: Client, - justifications: Arc>>, - _marker: PhantomData, -} - -/// Future and already received justifications from the source chain. -struct Justifications { - /// Justifications stream. - stream: JustificationsSubscription, - /// Justifications that we have read from the stream but have not sent to the - /// target node, because their targets were still not synced. - queue: VecDeque<(HeaderIdOf

, Justification)>, -} - -impl - SubstrateHeadersToSubstrateMaintain -{ - /// Create new maintain procedure. - pub fn new(pipeline: P, target_client: Client, justifications: JustificationsSubscription) -> Self { - SubstrateHeadersToSubstrateMaintain { - pipeline, - target_client, - justifications: Arc::new(Mutex::new(Justifications { - stream: justifications, - queue: VecDeque::new(), - })), - _marker: Default::default(), - } - } -} - -#[async_trait] -impl Clone - for SubstrateHeadersToSubstrateMaintain -{ - fn clone(&self) -> Self { - SubstrateHeadersToSubstrateMaintain { - pipeline: self.pipeline.clone(), - target_client: self.target_client.clone(), - justifications: self.justifications.clone(), - _marker: Default::default(), - } - } -} - -#[async_trait] -impl SyncMaintain

for SubstrateHeadersToSubstrateMaintain -where - SourceChain: Chain, - ::Number: Into, - ::Hash: Into, - TargetChain: Chain, - P::Number: Decode, - P::Hash: Decode, - P: SubstrateHeadersSyncPipeline, -{ - async fn maintain(&self, sync: &mut HeadersSync

) { - // lock justifications before doing anything else - let mut justifications = match self.justifications.try_lock() { - Some(justifications) => justifications, - None => { - // this should never happen, as we use single-thread executor - log::warn!(target: "bridge", "Failed to acquire {} justifications lock", P::SOURCE_NAME); - return; - } - }; - - // we need to read best finalized header from the target node to be able to - // choose justification to submit - let best_finalized = match best_finalized_header_id::(&self.target_client).await { - Ok(best_finalized) => best_finalized, - Err(error) => { - log::warn!( - target: "bridge", - "Failed to read best finalized {} block from maintain: {:?}", - P::SOURCE_NAME, - error, - ); - return; - } - }; - - log::debug!( - target: "bridge", - "Read best finalized {} block from {}: {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - best_finalized, - ); - - // Select justification to submit to the target node. We're submitting at most one justification - // on every maintain call. So maintain rate directly affects finalization rate. - let justification_to_submit = poll_fn(|context| { - // read justifications from the stream and push to the queue - justifications.read_from_stream::(context); - - // remove all obsolete justifications from the queue - remove_obsolete::

(&mut justifications.queue, best_finalized); - - // select justification to submit - Poll::Ready(select_justification(&mut justifications.queue, sync)) - }) - .await; - - // finally - submit selected justification - if let Some((target, justification)) = justification_to_submit { - let submit_result = self - .pipeline - .make_complete_header_transaction(target, justification) - .and_then(|tx| self.target_client.submit_extrinsic(Bytes(tx.encode()))) - .await; - - match submit_result { - Ok(_) => log::debug!( - target: "bridge", - "Submitted justification received over {} subscription. Target: {:?}", - P::SOURCE_NAME, - target, - ), - Err(error) => log::warn!( - target: "bridge", - "Failed to submit justification received over {} subscription for {:?}: {:?}", - P::SOURCE_NAME, - target, - error, - ), - } - } - } -} - -impl

Justifications

-where - P::Number: Decode, - P::Hash: Decode, - P: SubstrateHeadersSyncPipeline, -{ - /// Read justifications from the subscription stream without blocking. - fn read_from_stream<'a, SourceHeader>(&mut self, context: &mut std::task::Context<'a>) - where - SourceHeader: HeaderT, - SourceHeader::Number: Into, - SourceHeader::Hash: Into, - { - loop { - let maybe_next_justification = self.stream.next(); - futures::pin_mut!(maybe_next_justification); - - let maybe_next_justification = maybe_next_justification.poll_unpin(context); - let justification = match maybe_next_justification { - Poll::Ready(justification) => justification, - Poll::Pending => return, - }; - - // decode justification target - let target = bp_header_chain::justification::decode_justification_target::(&justification); - let target = match target { - Ok((target_hash, target_number)) => HeaderId(target_number.into(), target_hash.into()), - Err(error) => { - log::warn!( - target: "bridge", - "Failed to decode justification from {} subscription: {:?}", - P::SOURCE_NAME, - error, - ); - continue; - } - }; - - log::debug!( - target: "bridge", - "Received {} justification over subscription. Target: {:?}", - P::SOURCE_NAME, - target, - ); - - self.queue.push_back((target, justification.0)); - } - } -} - -/// Clean queue of all justifications that are justifying already finalized blocks. -fn remove_obsolete( - queue: &mut VecDeque<(HeaderIdOf

, Justification)>, - best_finalized: HeaderIdOf

, -) { - while queue - .front() - .map(|(target, _)| target.0 <= best_finalized.0) - .unwrap_or(false) - { - queue.pop_front(); - } -} - -/// Select appropriate justification that would improve best finalized block on target node. -/// -/// It is assumed that the selected justification will be submitted to the target node. The -/// justification itself and all preceeding justifications are removed from the queue. -fn select_justification

( - queue: &mut VecDeque<(HeaderIdOf

, Justification)>, - sync: &mut HeadersSync

, -) -> Option<(HeaderIdOf

, Justification)> -where - P: SubstrateHeadersSyncPipeline, -{ - let mut selected_justification = None; - while let Some((target, justification)) = queue.pop_front() { - // if we're waiting for this justification, report it - if sync.headers().requires_completion_data(&target) { - sync.headers_mut().completion_response(&target, Some(justification)); - // we won't submit previous justifications as we going to submit justification for - // next header - selected_justification = None; - // we won't submit next justifications as we need to submit previous justifications - // first - break; - } - - // if we know that the header is already synced (it is known to the target node), let's - // select it for submission. We still may select better justification on the next iteration. - if sync.headers().status(&target) == HeaderStatus::Synced { - selected_justification = Some((target, justification)); - continue; - } - - // finally - return justification back to the queue - queue.push_back((target, justification)); - break; - } - - selected_justification -} - -/// Returns best finalized source header on the target chain. -async fn best_finalized_header_id(client: &Client) -> Result, SubstrateError> -where - P: SubstrateHeadersSyncPipeline, - P::Number: Decode, - P::Hash: Decode, - C: Chain, -{ - let call = P::FINALIZED_BLOCK_METHOD.into(); - let data = Bytes(Vec::new()); - - let encoded_response = client.state_call(call, data, None).await?; - let decoded_response: (P::Number, P::Hash) = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - let best_header_id = HeaderId(decoded_response.0, decoded_response.1); - Ok(best_header_id) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::headers_pipeline::sync_params; - use crate::millau_headers_to_rialto::MillauHeadersToRialto; - - fn parent_hash(index: u8) -> bp_millau::Hash { - if index == 1 { - Default::default() - } else { - header(index - 1).hash() - } - } - - fn header_hash(index: u8) -> bp_millau::Hash { - header(index).hash() - } - - fn header(index: u8) -> bp_millau::Header { - bp_millau::Header::new( - index as _, - Default::default(), - Default::default(), - parent_hash(index), - Default::default(), - ) - } - - #[test] - fn obsolete_justifications_are_removed() { - let mut queue = vec![ - (HeaderId(1, header_hash(1)), vec![1]), - (HeaderId(2, header_hash(2)), vec![2]), - (HeaderId(3, header_hash(3)), vec![3]), - ] - .into_iter() - .collect(); - - remove_obsolete::(&mut queue, HeaderId(2, header_hash(2))); - - assert_eq!( - queue, - vec![(HeaderId(3, header_hash(3)), vec![3])] - .into_iter() - .collect::>(), - ); - } - - #[test] - fn latest_justification_is_selected() { - let mut queue = vec![ - (HeaderId(1, header_hash(1)), vec![1]), - (HeaderId(2, header_hash(2)), vec![2]), - (HeaderId(3, header_hash(3)), vec![3]), - ] - .into_iter() - .collect(); - let mut sync = HeadersSync::::new(sync_params()); - sync.headers_mut().header_response(header(1).into()); - sync.headers_mut().header_response(header(2).into()); - sync.headers_mut().header_response(header(3).into()); - sync.target_best_header_response(HeaderId(2, header_hash(2))); - - assert_eq!( - select_justification(&mut queue, &mut sync), - Some((HeaderId(2, header_hash(2)), vec![2])), - ); - } - - #[test] - fn required_justification_is_reported() { - let mut queue = vec![ - (HeaderId(1, header_hash(1)), vec![1]), - (HeaderId(2, header_hash(2)), vec![2]), - (HeaderId(3, header_hash(3)), vec![3]), - ] - .into_iter() - .collect(); - let mut sync = HeadersSync::::new(sync_params()); - sync.headers_mut().header_response(header(1).into()); - sync.headers_mut().header_response(header(2).into()); - sync.headers_mut().header_response(header(3).into()); - sync.headers_mut() - .incomplete_headers_response(vec![HeaderId(2, header_hash(2))].into_iter().collect()); - sync.target_best_header_response(HeaderId(2, header_hash(2))); - - assert_eq!(sync.headers_mut().header_to_complete(), None,); - - assert_eq!(select_justification(&mut queue, &mut sync), None,); - - assert_eq!( - sync.headers_mut().header_to_complete(), - Some((HeaderId(2, header_hash(2)), &vec![2])), - ); - } -} diff --git a/polkadot/bridges/relays/substrate/src/headers_pipeline.rs b/polkadot/bridges/relays/substrate/src/headers_pipeline.rs deleted file mode 100644 index 8ad6fc50b95271b2025e447366096e29e9d42202..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/headers_pipeline.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate-to-Substrate headers sync entrypoint. - -use crate::{headers_maintain::SubstrateHeadersToSubstrateMaintain, headers_target::SubstrateHeadersTarget}; - -use async_trait::async_trait; -use codec::Encode; -use headers_relay::{ - sync::{HeadersSyncParams, TargetTransactionMode}, - sync_types::{HeaderIdOf, HeadersSyncPipeline, QueuedHeader, SourceHeader}, -}; -use relay_substrate_client::{ - headers_source::HeadersSource, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, -}; -use relay_utils::BlockNumberBase; -use sp_runtime::Justification; -use std::marker::PhantomData; - -/// Headers sync pipeline for Substrate <-> Substrate relays. -#[async_trait] -pub trait SubstrateHeadersSyncPipeline: HeadersSyncPipeline { - /// Name of the `best_block` runtime method. - const BEST_BLOCK_METHOD: &'static str; - /// Name of the `finalized_block` runtime method. - const FINALIZED_BLOCK_METHOD: &'static str; - /// Name of the `is_known_block` runtime method. - const IS_KNOWN_BLOCK_METHOD: &'static str; - /// Name of the `incomplete_headers` runtime method. - const INCOMPLETE_HEADERS_METHOD: &'static str; - - /// Signed transaction type. - type SignedTransaction: Send + Sync + Encode; - - /// Make submit header transaction. - async fn make_submit_header_transaction( - &self, - header: QueuedHeader, - ) -> Result; - - /// Make completion transaction for the header. - async fn make_complete_header_transaction( - &self, - id: HeaderIdOf, - completion: Justification, - ) -> Result; -} - -/// Substrate-to-Substrate headers pipeline. -#[derive(Debug, Clone)] -pub struct SubstrateHeadersToSubstrate { - /// Client for the target chain. - pub(crate) target_client: Client, - /// Data required to sign target chain transactions. - pub(crate) target_sign: TargetSign, - /// Unused generic arguments dump. - _marker: PhantomData<(SourceChain, SourceSyncHeader)>, -} - -impl - SubstrateHeadersToSubstrate -{ - /// Create new Substrate-to-Substrate headers pipeline. - pub fn new(target_client: Client, target_sign: TargetSign) -> Self { - SubstrateHeadersToSubstrate { - target_client, - target_sign, - _marker: Default::default(), - } - } -} - -impl HeadersSyncPipeline - for SubstrateHeadersToSubstrate -where - SourceChain: Clone + Chain, - BlockNumberOf: BlockNumberBase, - SourceSyncHeader: - SourceHeader, BlockNumberOf> + std::ops::Deref, - TargetChain: Clone + Chain, - TargetSign: Clone + Send + Sync, -{ - const SOURCE_NAME: &'static str = SourceChain::NAME; - const TARGET_NAME: &'static str = TargetChain::NAME; - - type Hash = HashOf; - type Number = BlockNumberOf; - type Header = SourceSyncHeader; - type Extra = (); - type Completion = Justification; - - fn estimate_size(source: &QueuedHeader) -> usize { - source.header().encode().len() - } -} - -/// Return sync parameters for Substrate-to-Substrate headers sync. -pub fn sync_params() -> HeadersSyncParams { - HeadersSyncParams { - max_future_headers_to_download: 32, - max_headers_in_submitted_status: 8, - max_headers_in_single_submit: 1, - max_headers_size_in_single_submit: 1024 * 1024, - prune_depth: 256, - target_tx_mode: TargetTransactionMode::Signed, - } -} - -/// Run Substrate-to-Substrate headers sync. -pub async fn run( - pipeline: P, - source_client: Client, - target_client: Client, - metrics_params: Option, -) where - P: SubstrateHeadersSyncPipeline< - Hash = HashOf, - Number = BlockNumberOf, - Completion = Justification, - Extra = (), - >, - P::Header: SourceHeader, BlockNumberOf>, - SourceChain: Clone + Chain, - SourceChain::Header: Into, - BlockNumberOf: BlockNumberBase, - TargetChain: Clone + Chain, -{ - let source_justifications = match source_client.clone().subscribe_justifications().await { - Ok(source_justifications) => source_justifications, - Err(error) => { - log::warn!( - target: "bridge", - "Failed to subscribe to {} justifications: {:?}", - SourceChain::NAME, - error, - ); - - return; - } - }; - - let sync_maintain = SubstrateHeadersToSubstrateMaintain::<_, SourceChain, _>::new( - pipeline.clone(), - target_client.clone(), - source_justifications, - ); - - log::info!( - target: "bridge", - "Starting {} -> {} headers relay", - SourceChain::NAME, - TargetChain::NAME, - ); - - headers_relay::sync_loop::run( - HeadersSource::new(source_client), - SourceChain::AVERAGE_BLOCK_INTERVAL, - SubstrateHeadersTarget::new(target_client, pipeline), - TargetChain::AVERAGE_BLOCK_INTERVAL, - sync_maintain, - sync_params(), - metrics_params, - futures::future::pending(), - ); -} diff --git a/polkadot/bridges/relays/substrate/src/headers_target.rs b/polkadot/bridges/relays/substrate/src/headers_target.rs deleted file mode 100644 index 2b5f63a7feae35dca74b831c060c0cc03cfca0dc..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/headers_target.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate headers target. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! chain. - -use crate::headers_pipeline::SubstrateHeadersSyncPipeline; - -use async_trait::async_trait; -use codec::{Decode, Encode}; -use futures::TryFutureExt; -use headers_relay::{ - sync_loop::TargetClient, - sync_types::{HeaderIdOf, QueuedHeader, SubmittedHeaders}, -}; -use relay_substrate_client::{Chain, Client, Error as SubstrateError}; -use relay_utils::{relay_loop::Client as RelayClient, HeaderId}; -use sp_core::Bytes; -use sp_runtime::Justification; -use std::collections::HashSet; - -/// Substrate client as Substrate headers target. -pub struct SubstrateHeadersTarget { - client: Client, - pipeline: P, -} - -impl SubstrateHeadersTarget { - /// Create new Substrate headers target. - pub fn new(client: Client, pipeline: P) -> Self { - SubstrateHeadersTarget { client, pipeline } - } -} - -impl Clone for SubstrateHeadersTarget { - fn clone(&self) -> Self { - SubstrateHeadersTarget { - client: self.client.clone(), - pipeline: self.pipeline.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateHeadersTarget { - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await - } -} - -#[async_trait] -impl TargetClient

for SubstrateHeadersTarget -where - C: Chain, - P::Number: Decode, - P::Hash: Decode + Encode, - P: SubstrateHeadersSyncPipeline, -{ - async fn best_header_id(&self) -> Result, SubstrateError> { - // we can't continue to relay headers if target node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - - let call = P::BEST_BLOCK_METHOD.into(); - let data = Bytes(Vec::new()); - - let encoded_response = self.client.state_call(call, data, None).await?; - let decoded_response: Vec<(P::Number, P::Hash)> = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - // If we parse an empty list of headers it means that bridge pallet has not been initalized - // yet. Otherwise we expect to always have at least one header. - decoded_response - .last() - .ok_or(SubstrateError::UninitializedBridgePallet) - .map(|(num, hash)| HeaderId(*num, *hash)) - } - - async fn is_known_header(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, bool), SubstrateError> { - let call = P::IS_KNOWN_BLOCK_METHOD.into(); - let data = Bytes(id.1.encode()); - - let encoded_response = self.client.state_call(call, data, None).await?; - let is_known_block: bool = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - Ok((id, is_known_block)) - } - - async fn submit_headers( - &self, - mut headers: Vec>, - ) -> SubmittedHeaders, SubstrateError> { - debug_assert_eq!( - headers.len(), - 1, - "Substrate pallet only supports single header / transaction" - ); - - let header = headers.remove(0); - let id = header.id(); - let submit_transaction_result = self - .pipeline - .make_submit_header_transaction(header) - .and_then(|tx| self.client.submit_extrinsic(Bytes(tx.encode()))) - .await; - - match submit_transaction_result { - Ok(_) => SubmittedHeaders { - submitted: vec![id], - incomplete: Vec::new(), - rejected: Vec::new(), - fatal_error: None, - }, - Err(error) => SubmittedHeaders { - submitted: Vec::new(), - incomplete: Vec::new(), - rejected: vec![id], - fatal_error: Some(error), - }, - } - } - - async fn incomplete_headers_ids(&self) -> Result>, SubstrateError> { - let call = P::INCOMPLETE_HEADERS_METHOD.into(); - let data = Bytes(Vec::new()); - - let encoded_response = self.client.state_call(call, data, None).await?; - let decoded_response: Vec<(P::Number, P::Hash)> = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - - let incomplete_headers = decoded_response - .into_iter() - .map(|(number, hash)| HeaderId(number, hash)) - .collect(); - Ok(incomplete_headers) - } - - async fn complete_header( - &self, - id: HeaderIdOf

, - completion: Justification, - ) -> Result, SubstrateError> { - let tx = self.pipeline.make_complete_header_transaction(id, completion).await?; - self.client.submit_extrinsic(Bytes(tx.encode())).await?; - Ok(id) - } - - async fn requires_extra(&self, header: QueuedHeader

) -> Result<(HeaderIdOf

, bool), SubstrateError> { - Ok((header.id(), false)) - } -} diff --git a/polkadot/bridges/relays/substrate/src/main.rs b/polkadot/bridges/relays/substrate/src/main.rs deleted file mode 100644 index c73533cdfca8af6bab6c88e9c1667e8035bc5337..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/main.rs +++ /dev/null @@ -1,709 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate-to-substrate relay entrypoint. - -#![warn(missing_docs)] - -use codec::{Decode, Encode}; -use frame_support::weights::{GetDispatchInfo, Weight}; -use pallet_bridge_call_dispatch::{CallOrigin, MessagePayload}; -use relay_kusama_client::Kusama; -use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Chain, ConnectionParams, TransactionSignScheme}; -use relay_utils::initialize::initialize_relay; -use sp_core::{Bytes, Pair}; -use sp_runtime::traits::IdentifyAccount; - -/// Kusama node client. -pub type KusamaClient = relay_substrate_client::Client; -/// Millau node client. -pub type MillauClient = relay_substrate_client::Client; -/// Rialto node client. -pub type RialtoClient = relay_substrate_client::Client; - -mod cli; -mod headers_initialize; -mod headers_maintain; -mod headers_pipeline; -mod headers_target; -mod messages_lane; -mod messages_source; -mod messages_target; -mod millau_headers_to_rialto; -mod millau_messages_to_rialto; -mod rialto_headers_to_millau; -mod rialto_messages_to_millau; - -fn main() { - initialize_relay(); - - let result = async_std::task::block_on(run_command(cli::parse_args())); - if let Err(error) = result { - log::error!(target: "bridge", "Failed to start relay: {}", error); - } -} - -async fn run_command(command: cli::Command) -> Result<(), String> { - match command { - cli::Command::InitBridge(arg) => run_init_bridge(arg).await, - cli::Command::RelayHeaders(arg) => run_relay_headers(arg).await, - cli::Command::RelayMessages(arg) => run_relay_messages(arg).await, - cli::Command::SendMessage(arg) => run_send_message(arg).await, - } -} - -async fn run_init_bridge(command: cli::InitBridge) -> Result<(), String> { - match command { - cli::InitBridge::MillauToRialto { - millau, - rialto, - rialto_sign, - millau_bridge_params, - } => { - let millau_client = millau.into_client().await?; - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - - let rialto_signer_next_index = rialto_client - .next_account_index(rialto_sign.signer.public().into()) - .await?; - - headers_initialize::initialize( - millau_client, - rialto_client.clone(), - millau_bridge_params.millau_initial_header, - millau_bridge_params.millau_initial_authorities, - millau_bridge_params.millau_initial_authorities_set_id, - move |initialization_data| { - Ok(Bytes( - Rialto::sign_transaction( - &rialto_client, - &rialto_sign.signer, - rialto_signer_next_index, - rialto_runtime::SudoCall::sudo(Box::new( - rialto_runtime::BridgeMillauCall::initialize(initialization_data).into(), - )) - .into(), - ) - .encode(), - )) - }, - ) - .await; - } - cli::InitBridge::RialtoToMillau { - rialto, - millau, - millau_sign, - rialto_bridge_params, - } => { - let rialto_client = rialto.into_client().await?; - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - let millau_signer_next_index = millau_client - .next_account_index(millau_sign.signer.public().into()) - .await?; - - headers_initialize::initialize( - rialto_client, - millau_client.clone(), - rialto_bridge_params.rialto_initial_header, - rialto_bridge_params.rialto_initial_authorities, - rialto_bridge_params.rialto_initial_authorities_set_id, - move |initialization_data| { - Ok(Bytes( - Millau::sign_transaction( - &millau_client, - &millau_sign.signer, - millau_signer_next_index, - millau_runtime::SudoCall::sudo(Box::new( - millau_runtime::BridgeRialtoCall::initialize(initialization_data).into(), - )) - .into(), - ) - .encode(), - )) - }, - ) - .await; - } - } - Ok(()) -} - -async fn run_relay_headers(command: cli::RelayHeaders) -> Result<(), String> { - match command { - cli::RelayHeaders::MillauToRialto { - millau, - rialto, - rialto_sign, - prometheus_params, - } => { - let millau_client = millau.into_client().await?; - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - millau_headers_to_rialto::run(millau_client, rialto_client, rialto_sign, prometheus_params.into()).await; - } - cli::RelayHeaders::RialtoToMillau { - rialto, - millau, - millau_sign, - prometheus_params, - } => { - let rialto_client = rialto.into_client().await?; - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - rialto_headers_to_millau::run(rialto_client, millau_client, millau_sign, prometheus_params.into()).await; - } - } - Ok(()) -} - -async fn run_relay_messages(command: cli::RelayMessages) -> Result<(), String> { - match command { - cli::RelayMessages::MillauToRialto { - millau, - millau_sign, - rialto, - rialto_sign, - prometheus_params, - lane, - } => { - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - - millau_messages_to_rialto::run( - millau_client, - millau_sign, - rialto_client, - rialto_sign, - lane.into(), - prometheus_params.into(), - ); - } - cli::RelayMessages::RialtoToMillau { - rialto, - rialto_sign, - millau, - millau_sign, - prometheus_params, - lane, - } => { - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - - rialto_messages_to_millau::run( - rialto_client, - rialto_sign, - millau_client, - millau_sign, - lane.into(), - prometheus_params.into(), - ); - } - } - Ok(()) -} - -async fn run_send_message(command: cli::SendMessage) -> Result<(), String> { - match command { - cli::SendMessage::MillauToRialto { - millau, - millau_sign, - rialto_sign, - lane, - message, - dispatch_weight, - fee, - origin, - .. - } => { - let millau_client = millau.into_client().await?; - let millau_sign = millau_sign.parse()?; - let rialto_sign = rialto_sign.parse()?; - let rialto_call = message.into_call(); - - let payload = - millau_to_rialto_message_payload(&millau_sign, &rialto_sign, &rialto_call, origin, dispatch_weight); - let dispatch_weight = payload.weight; - - let lane = lane.into(); - let fee = get_fee(fee, || { - estimate_message_delivery_and_dispatch_fee( - &millau_client, - bp_rialto::TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD, - lane, - payload.clone(), - ) - }) - .await?; - - let millau_call = millau_runtime::Call::BridgeRialtoMessageLane( - millau_runtime::MessageLaneCall::send_message(lane, payload, fee), - ); - - let signed_millau_call = Millau::sign_transaction( - &millau_client, - &millau_sign.signer, - millau_client - .next_account_index(millau_sign.signer.public().clone().into()) - .await?, - millau_call, - ) - .encode(); - - log::info!( - target: "bridge", - "Sending message to Rialto. Size: {}. Dispatch weight: {}. Fee: {}", - signed_millau_call.len(), - dispatch_weight, - fee, - ); - - millau_client.submit_extrinsic(Bytes(signed_millau_call)).await?; - } - cli::SendMessage::RialtoToMillau { - rialto, - rialto_sign, - millau_sign, - lane, - message, - dispatch_weight, - fee, - origin, - .. - } => { - let rialto_client = rialto.into_client().await?; - let rialto_sign = rialto_sign.parse()?; - let millau_sign = millau_sign.parse()?; - let millau_call = message.into_call(); - - let payload = - rialto_to_millau_message_payload(&rialto_sign, &millau_sign, &millau_call, origin, dispatch_weight); - let dispatch_weight = payload.weight; - - let lane = lane.into(); - let fee = get_fee(fee, || { - estimate_message_delivery_and_dispatch_fee( - &rialto_client, - bp_millau::TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD, - lane, - payload.clone(), - ) - }) - .await?; - - let rialto_call = rialto_runtime::Call::BridgeMillauMessageLane( - rialto_runtime::MessageLaneCall::send_message(lane, payload, fee), - ); - - let signed_rialto_call = Rialto::sign_transaction( - &rialto_client, - &rialto_sign.signer, - rialto_client - .next_account_index(rialto_sign.signer.public().clone().into()) - .await?, - rialto_call, - ) - .encode(); - - log::info!( - target: "bridge", - "Sending message to Millau. Size: {}. Dispatch weight: {}. Fee: {}", - signed_rialto_call.len(), - dispatch_weight, - fee, - ); - - rialto_client.submit_extrinsic(Bytes(signed_rialto_call)).await?; - } - } - Ok(()) -} - -async fn estimate_message_delivery_and_dispatch_fee( - client: &relay_substrate_client::Client, - estimate_fee_method: &str, - lane: bp_message_lane::LaneId, - payload: P, -) -> Result, relay_substrate_client::Error> { - let encoded_response = client - .state_call(estimate_fee_method.into(), (lane, payload).encode().into(), None) - .await?; - let decoded_response: Option = - Decode::decode(&mut &encoded_response.0[..]).map_err(relay_substrate_client::Error::ResponseParseFailed)?; - Ok(decoded_response) -} - -fn remark_payload(remark_size: Option>, maximal_allowed_size: u32) -> Vec { - match remark_size { - Some(cli::ExplicitOrMaximal::Explicit(remark_size)) => vec![0; remark_size], - Some(cli::ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _], - None => format!( - "Unix time: {}", - std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_secs(), - ) - .as_bytes() - .to_vec(), - } -} - -fn rialto_to_millau_message_payload( - rialto_sign: &RialtoSigningParams, - millau_sign: &MillauSigningParams, - millau_call: &millau_runtime::Call, - origin: cli::Origins, - user_specified_dispatch_weight: Option>, -) -> rialto_runtime::millau_messages::ToMillauMessagePayload { - let millau_call_weight = prepare_call_dispatch_weight( - user_specified_dispatch_weight, - cli::ExplicitOrMaximal::Explicit(millau_call.get_dispatch_info().weight), - compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight()), - ); - let rialto_sender_public: bp_rialto::AccountSigner = rialto_sign.signer.public().clone().into(); - let rialto_account_id: bp_rialto::AccountId = rialto_sender_public.into_account(); - let millau_origin_public = millau_sign.signer.public(); - - MessagePayload { - spec_version: millau_runtime::VERSION.spec_version, - weight: millau_call_weight, - origin: match origin { - cli::Origins::Source => CallOrigin::SourceAccount(rialto_account_id), - cli::Origins::Target => { - let digest = rialto_runtime::millau_account_ownership_digest( - &millau_call, - rialto_account_id.clone(), - millau_runtime::VERSION.spec_version, - ); - - let digest_signature = millau_sign.signer.sign(&digest); - - CallOrigin::TargetAccount(rialto_account_id, millau_origin_public.into(), digest_signature.into()) - } - }, - call: millau_call.encode(), - } -} - -fn millau_to_rialto_message_payload( - millau_sign: &MillauSigningParams, - rialto_sign: &RialtoSigningParams, - rialto_call: &rialto_runtime::Call, - origin: cli::Origins, - user_specified_dispatch_weight: Option>, -) -> millau_runtime::rialto_messages::ToRialtoMessagePayload { - let rialto_call_weight = prepare_call_dispatch_weight( - user_specified_dispatch_weight, - cli::ExplicitOrMaximal::Explicit(rialto_call.get_dispatch_info().weight), - compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight()), - ); - let millau_sender_public: bp_millau::AccountSigner = millau_sign.signer.public().clone().into(); - let millau_account_id: bp_millau::AccountId = millau_sender_public.into_account(); - let rialto_origin_public = rialto_sign.signer.public(); - - MessagePayload { - spec_version: rialto_runtime::VERSION.spec_version, - weight: rialto_call_weight, - origin: match origin { - cli::Origins::Source => CallOrigin::SourceAccount(millau_account_id), - cli::Origins::Target => { - let digest = millau_runtime::rialto_account_ownership_digest( - &rialto_call, - millau_account_id.clone(), - rialto_runtime::VERSION.spec_version, - ); - - let digest_signature = rialto_sign.signer.sign(&digest); - - CallOrigin::TargetAccount(millau_account_id, rialto_origin_public.into(), digest_signature.into()) - } - }, - call: rialto_call.encode(), - } -} - -fn prepare_call_dispatch_weight( - user_specified_dispatch_weight: Option>, - weight_from_pre_dispatch_call: cli::ExplicitOrMaximal, - maximal_allowed_weight: Weight, -) -> Weight { - match user_specified_dispatch_weight.unwrap_or(weight_from_pre_dispatch_call) { - cli::ExplicitOrMaximal::Explicit(weight) => weight, - cli::ExplicitOrMaximal::Maximal => maximal_allowed_weight, - } -} - -async fn get_fee(fee: Option, f: F) -> Result -where - Fee: Decode, - F: FnOnce() -> R, - R: std::future::Future, E>>, - E: std::fmt::Debug, -{ - match fee { - Some(fee) => Ok(fee), - None => match f().await { - Ok(Some(fee)) => Ok(fee), - Ok(None) => Err("Failed to estimate message fee. Message is too heavy?".into()), - Err(error) => Err(format!("Failed to estimate message fee: {:?}", error)), - }, - } -} - -fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight) -} - -fn compute_maximal_message_arguments_size( - maximal_source_extrinsic_size: u32, - maximal_target_extrinsic_size: u32, -) -> u32 { - // assume that both signed extensions and other arguments fit 1KB - let service_tx_bytes_on_source_chain = 1024; - let maximal_source_extrinsic_size = maximal_source_extrinsic_size - service_tx_bytes_on_source_chain; - let maximal_call_size = - bridge_runtime_common::messages::target::maximal_incoming_message_size(maximal_target_extrinsic_size); - let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size { - maximal_source_extrinsic_size - } else { - maximal_call_size - }; - - // bytes in Call encoding that are used to encode everything except arguments - let service_bytes = 1 + 1 + 4; - maximal_call_size - service_bytes -} - -impl crate::cli::RialtoSigningParams { - /// Parse CLI parameters into typed signing params. - pub fn parse(self) -> Result { - RialtoSigningParams::from_suri(&self.rialto_signer, self.rialto_signer_password.as_deref()) - .map_err(|e| format!("Failed to parse rialto-signer: {:?}", e)) - } -} - -impl crate::cli::MillauSigningParams { - /// Parse CLI parameters into typed signing params. - pub fn parse(self) -> Result { - MillauSigningParams::from_suri(&self.millau_signer, self.millau_signer_password.as_deref()) - .map_err(|e| format!("Failed to parse millau-signer: {:?}", e)) - } -} - -impl crate::cli::MillauConnectionParams { - /// Convert CLI connection parameters into Millau RPC Client. - pub async fn into_client(self) -> relay_substrate_client::Result { - MillauClient::new(ConnectionParams { - host: self.millau_host, - port: self.millau_port, - }) - .await - } -} -impl crate::cli::RialtoConnectionParams { - /// Convert CLI connection parameters into Rialto RPC Client. - pub async fn into_client(self) -> relay_substrate_client::Result { - RialtoClient::new(ConnectionParams { - host: self.rialto_host, - port: self.rialto_port, - }) - .await - } -} - -impl crate::cli::ToRialtoMessage { - /// Convert CLI call request into runtime `Call` instance. - pub fn into_call(self) -> rialto_runtime::Call { - match self { - cli::ToRialtoMessage::Remark { remark_size } => { - rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(remark_payload( - remark_size, - compute_maximal_message_arguments_size( - bp_millau::max_extrinsic_size(), - bp_rialto::max_extrinsic_size(), - ), - ))) - } - cli::ToRialtoMessage::Transfer { recipient, amount } => { - rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer(recipient, amount)) - } - } - } -} - -impl crate::cli::ToMillauMessage { - /// Convert CLI call request into runtime `Call` instance. - pub fn into_call(self) -> millau_runtime::Call { - match self { - cli::ToMillauMessage::Remark { remark_size } => { - millau_runtime::Call::System(millau_runtime::SystemCall::remark(remark_payload( - remark_size, - compute_maximal_message_arguments_size( - bp_rialto::max_extrinsic_size(), - bp_millau::max_extrinsic_size(), - ), - ))) - } - cli::ToMillauMessage::Transfer { recipient, amount } => { - millau_runtime::Call::Balances(millau_runtime::BalancesCall::transfer(recipient, amount)) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_message_lane::source_chain::TargetHeaderChain; - use sp_core::Pair; - use sp_runtime::traits::{IdentifyAccount, Verify}; - - #[test] - fn millau_signature_is_valid_on_rialto() { - let millau_sign = relay_millau_client::SigningParams::from_suri("//Dave", None).unwrap(); - - let call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); - - let millau_public: bp_millau::AccountSigner = millau_sign.signer.public().clone().into(); - let millau_account_id: bp_millau::AccountId = millau_public.into_account(); - - let digest = millau_runtime::rialto_account_ownership_digest( - &call, - millau_account_id, - rialto_runtime::VERSION.spec_version, - ); - - let rialto_signer = relay_rialto_client::SigningParams::from_suri("//Dave", None).unwrap(); - let signature = rialto_signer.signer.sign(&digest); - - assert!(signature.verify(&digest[..], &rialto_signer.signer.public())); - } - - #[test] - fn rialto_signature_is_valid_on_millau() { - let rialto_sign = relay_rialto_client::SigningParams::from_suri("//Dave", None).unwrap(); - - let call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); - - let rialto_public: bp_rialto::AccountSigner = rialto_sign.signer.public().clone().into(); - let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account(); - - let digest = rialto_runtime::millau_account_ownership_digest( - &call, - rialto_account_id, - millau_runtime::VERSION.spec_version, - ); - - let millau_signer = relay_millau_client::SigningParams::from_suri("//Dave", None).unwrap(); - let signature = millau_signer.signer.sign(&digest); - - assert!(signature.verify(&digest[..], &millau_signer.signer.public())); - } - - #[test] - fn maximal_rialto_to_millau_message_arguments_size_is_computed_correctly() { - use rialto_runtime::millau_messages::Millau; - - let maximal_remark_size = - compute_maximal_message_arguments_size(bp_rialto::max_extrinsic_size(), bp_millau::max_extrinsic_size()); - - let call: millau_runtime::Call = millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into(); - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: call.get_dispatch_info().weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert_eq!(Millau::verify_message(&payload), Ok(())); - - let call: millau_runtime::Call = - millau_runtime::SystemCall::remark(vec![42; (maximal_remark_size + 1) as _]).into(); - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: call.get_dispatch_info().weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert!(Millau::verify_message(&payload).is_err()); - } - - #[test] - fn maximal_size_remark_to_rialto_is_generated_correctly() { - assert!( - bridge_runtime_common::messages::target::maximal_incoming_message_size( - bp_rialto::max_extrinsic_size() - ) > bp_millau::max_extrinsic_size(), - "We can't actually send maximal messages to Rialto from Millau, because Millau extrinsics can't be that large", - ) - } - - #[test] - fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() { - use rialto_runtime::millau_messages::Millau; - - let maximal_dispatch_weight = compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight()); - let call: millau_runtime::Call = rialto_runtime::SystemCall::remark(vec![]).into(); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert_eq!(Millau::verify_message(&payload), Ok(())); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight + 1, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert!(Millau::verify_message(&payload).is_err()); - } - - #[test] - fn maximal_weight_fill_block_to_rialto_is_generated_correctly() { - use millau_runtime::rialto_messages::Rialto; - - let maximal_dispatch_weight = compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); - let call: rialto_runtime::Call = millau_runtime::SystemCall::remark(vec![]).into(); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert_eq!(Rialto::verify_message(&payload), Ok(())); - - let payload = pallet_bridge_call_dispatch::MessagePayload { - spec_version: Default::default(), - weight: maximal_dispatch_weight + 1, - origin: pallet_bridge_call_dispatch::CallOrigin::SourceRoot, - call: call.encode(), - }; - assert!(Rialto::verify_message(&payload).is_err()); - } -} diff --git a/polkadot/bridges/relays/substrate/src/messages_lane.rs b/polkadot/bridges/relays/substrate/src/messages_lane.rs deleted file mode 100644 index 78b5f5c02488033ddb7c2be2a2166cfef669ec2c..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/messages_lane.rs +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::messages_source::SubstrateMessagesProof; -use crate::messages_target::SubstrateMessagesReceivingProof; - -use async_trait::async_trait; -use bp_message_lane::MessageNonce; -use codec::Encode; -use frame_support::weights::Weight; -use messages_relay::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; -use relay_substrate_client::{BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf}; -use relay_utils::BlockNumberBase; -use std::ops::RangeInclusive; - -/// Message sync pipeline for Substrate <-> Substrate relays. -#[async_trait] -pub trait SubstrateMessageLane: MessageLane { - /// Name of the runtime method that returns dispatch weight of outbound messages at the source chain. - const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str; - /// Name of the runtime method that returns latest generated nonce at the source chain. - const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str; - /// Name of the runtime method that returns latest received (confirmed) nonce at the the source chain. - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; - - /// Name of the runtime method that returns latest received nonce at the target chain. - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; - /// Name of the runtime method that returns latest confirmed (reward-paid) nonce at the target chain. - const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str; - /// Numebr of the runtime method that returns state of "unrewarded relayers" set at the target chain. - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str; - - /// Name of the runtime method that returns id of best finalized source header at target chain. - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str; - /// Name of the runtime method that returns id of best finalized target header at source chain. - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str; - - /// Signed transaction type of the source chain. - type SourceSignedTransaction: Send + Sync + Encode; - /// Signed transaction type of the target chain. - type TargetSignedTransaction: Send + Sync + Encode; - - /// Make messages delivery transaction. - async fn make_messages_delivery_transaction( - &self, - generated_at_header: SourceHeaderIdOf, - nonces: RangeInclusive, - proof: Self::MessagesProof, - ) -> Result; - - /// Make messages receiving proof transaction. - async fn make_messages_receiving_proof_transaction( - &self, - generated_at_header: TargetHeaderIdOf, - proof: Self::MessagesReceivingProof, - ) -> Result; -} - -/// Substrate-to-Substrate message lane. -#[derive(Debug)] -pub struct SubstrateMessageLaneToSubstrate { - /// Client for the source Substrate chain. - pub(crate) source_client: Client, - /// Parameters required to sign transactions for source chain. - pub(crate) source_sign: SourceSignParams, - /// Client for the target Substrate chain. - pub(crate) target_client: Client, - /// Parameters required to sign transactions for target chain. - pub(crate) target_sign: TargetSignParams, - /// Account id of relayer at the source chain. - pub(crate) relayer_id_at_source: Source::AccountId, -} - -impl Clone - for SubstrateMessageLaneToSubstrate -{ - fn clone(&self) -> Self { - Self { - source_client: self.source_client.clone(), - source_sign: self.source_sign.clone(), - target_client: self.target_client.clone(), - target_sign: self.target_sign.clone(), - relayer_id_at_source: self.relayer_id_at_source.clone(), - } - } -} - -impl MessageLane - for SubstrateMessageLaneToSubstrate -where - SourceSignParams: Clone + Send + Sync + 'static, - TargetSignParams: Clone + Send + Sync + 'static, - BlockNumberOf: BlockNumberBase, - BlockNumberOf: BlockNumberBase, -{ - const SOURCE_NAME: &'static str = Source::NAME; - const TARGET_NAME: &'static str = Target::NAME; - - type MessagesProof = SubstrateMessagesProof; - type MessagesReceivingProof = SubstrateMessagesReceivingProof; - - type SourceHeaderNumber = BlockNumberOf; - type SourceHeaderHash = HashOf; - - type TargetHeaderNumber = BlockNumberOf; - type TargetHeaderHash = HashOf; -} - -/// Returns maximal number of messages and their maximal cumulative dispatch weight, based -/// on given chain parameters. -pub fn select_delivery_transaction_limits( - max_extrinsic_weight: Weight, - max_unconfirmed_messages_at_inbound_lane: MessageNonce, -) -> (MessageNonce, Weight) { - // We may try to guess accurate value, based on maximal number of messages and per-message - // weight overhead, but the relay loop isn't using this info in a super-accurate way anyway. - // So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is - // for messages dispatch. - - // Another thing to keep in mind is that our runtimes (when this code was written) accept - // messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than - // that for dispatch. - - let weight_for_delivery_tx = max_extrinsic_weight / 3; - let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; - - let delivery_tx_base_weight = - W::receive_messages_proof_overhead() + W::receive_messages_proof_outbound_lane_state_overhead(); - let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight; - let max_number_of_messages = std::cmp::min( - delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1), - max_unconfirmed_messages_at_inbound_lane, - ); - - assert!( - max_number_of_messages > 0, - "Relay should fit at least one message in every delivery transaction", - ); - assert!( - weight_for_messages_dispatch >= max_extrinsic_weight / 2, - "Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2", - ); - - (max_number_of_messages, weight_for_messages_dispatch) -} - -#[cfg(test)] -mod tests { - use super::*; - - type RialtoToMillauMessageLaneWeights = pallet_message_lane::weights::RialtoWeight; - - #[test] - fn select_delivery_transaction_limits_works() { - let (max_count, max_weight) = select_delivery_transaction_limits::( - bp_millau::max_extrinsic_weight(), - bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - ); - assert_eq!( - (max_count, max_weight), - // We don't actually care about these values, so feel free to update them whenever test - // fails. The only thing to do before that is to ensure that new values looks sane: i.e. weight - // reserved for messages dispatch allows dispatch of non-trivial messages. - // - // Any significant change in this values should attract additional attention. - (955, 216_583_333_334), - ); - } -} diff --git a/polkadot/bridges/relays/substrate/src/messages_source.rs b/polkadot/bridges/relays/substrate/src/messages_source.rs deleted file mode 100644 index db894df8c7ac68af175381a9088a807815ad0200..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/messages_source.rs +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate messages source. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! chain. - -use crate::messages_lane::SubstrateMessageLane; - -use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::InstanceId; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use messages_relay::{ - message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{ - ClientState, MessageProofParameters, MessageWeights, MessageWeightsMap, SourceClient, SourceClientState, - }, -}; -use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf}; -use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId}; -use sp_core::Bytes; -use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; -use std::ops::RangeInclusive; - -/// Intermediate message proof returned by the source Substrate node. Includes everything -/// required to submit to the target node: cumulative dispatch weight of bundled messages and -/// the proof itself. -pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); - -/// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - client: Client, - lane: P, - lane_id: LaneId, - instance: InstanceId, -} - -impl SubstrateMessagesSource { - /// Create new Substrate headers source. - pub fn new(client: Client, lane: P, lane_id: LaneId, instance: InstanceId) -> Self { - SubstrateMessagesSource { - client, - lane, - lane_id, - instance, - } - } -} - -impl Clone for SubstrateMessagesSource { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - lane: self.lane.clone(), - lane_id: self.lane_id, - instance: self.instance, - } - } -} - -#[async_trait] -impl RelayClient for SubstrateMessagesSource { - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await - } -} - -#[async_trait] -impl SourceClient

for SubstrateMessagesSource -where - C: Chain, - C::Header: DeserializeOwned, - C::Index: DeserializeOwned, - C::BlockNumber: BlockNumberBase, - P: SubstrateMessageLane< - MessagesProof = SubstrateMessagesProof, - SourceHeaderNumber = ::Number, - SourceHeaderHash = ::Hash, - >, - P::TargetHeaderNumber: Decode, - P::TargetHeaderHash: Decode, -{ - async fn state(&self) -> Result, SubstrateError> { - // we can't continue to deliver confirmations if source node is out of sync, because - // it may have already received confirmations that we're going to deliver - self.client.ensure_synced().await?; - - read_client_state::<_, P::TargetHeaderHash, P::TargetHeaderNumber>( - &self.client, - P::BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE, - ) - .await - } - - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let latest_generated_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, latest_generated_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let latest_received_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, latest_received_nonce)) - } - - async fn generated_messages_weights( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - ) -> Result { - let encoded_response = self - .client - .state_call( - P::OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD.into(), - Bytes((self.lane_id, nonces.start(), nonces.end()).encode()), - Some(id.1), - ) - .await?; - - make_message_weights_map::( - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?, - nonces, - ) - } - - async fn prove_messages( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), SubstrateError> { - let proof = self - .client - .prove_messages( - self.instance, - self.lane_id, - nonces.clone(), - proof_parameters.outbound_state_proof_required, - id.1, - ) - .await? - .iter_nodes() - .collect(); - let proof = FromBridgedChainMessagesProof { - bridged_header_hash: id.1, - storage_proof: proof, - lane: self.lane_id, - nonces_start: *nonces.start(), - nonces_end: *nonces.end(), - }; - Ok((id, nonces, (proof_parameters.dispatch_weight, proof))) - } - - async fn submit_messages_receiving_proof( - &self, - generated_at_block: TargetHeaderIdOf

, - proof: P::MessagesReceivingProof, - ) -> Result<(), SubstrateError> { - let tx = self - .lane - .make_messages_receiving_proof_transaction(generated_at_block, proof) - .await?; - self.client.submit_extrinsic(Bytes(tx.encode())).await?; - Ok(()) - } -} - -pub async fn read_client_state( - self_client: &Client, - best_finalized_header_id_method_name: &str, -) -> Result, HeaderId>, SubstrateError> -where - SelfChain: Chain, - SelfChain::Header: DeserializeOwned, - SelfChain::Index: DeserializeOwned, - BridgedHeaderHash: Decode, - BridgedHeaderNumber: Decode, -{ - // let's read our state first: we need best finalized header hash on **this** chain - let self_best_finalized_header_hash = self_client.best_finalized_header_hash().await?; - let self_best_finalized_header = self_client.header_by_hash(self_best_finalized_header_hash).await?; - let self_best_finalized_id = HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash); - - // now let's read our best header on **this** chain - let self_best_header = self_client.best_header().await?; - let self_best_hash = self_best_header.hash(); - let self_best_id = HeaderId(*self_best_header.number(), self_best_hash); - - // now let's read id of best finalized peer header at our best finalized block - let encoded_best_finalized_peer_on_self = self_client - .state_call( - best_finalized_header_id_method_name.into(), - Bytes(Vec::new()), - Some(self_best_hash), - ) - .await?; - let decoded_best_finalized_peer_on_self: (BridgedHeaderNumber, BridgedHeaderHash) = - Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - let peer_on_self_best_finalized_id = HeaderId( - decoded_best_finalized_peer_on_self.0, - decoded_best_finalized_peer_on_self.1, - ); - - Ok(ClientState { - best_self: self_best_id, - best_finalized_self: self_best_finalized_id, - best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, - }) -} - -fn make_message_weights_map( - weights: Vec<(MessageNonce, Weight, u32)>, - nonces: RangeInclusive, -) -> Result { - let make_missing_nonce_error = |expected_nonce| { - Err(SubstrateError::Custom(format!( - "Missing nonce {} in messages_dispatch_weight call result. Expected all nonces from {:?}", - expected_nonce, nonces, - ))) - }; - - let mut weights_map = MessageWeightsMap::new(); - - // this is actually prevented by external logic - if nonces.is_empty() { - return Ok(weights_map); - } - - // check if last nonce is missing - loop below is not checking this - let last_nonce_is_missing = weights - .last() - .map(|(last_nonce, _, _)| last_nonce != nonces.end()) - .unwrap_or(true); - if last_nonce_is_missing { - return make_missing_nonce_error(*nonces.end()); - } - - let mut expected_nonce = *nonces.start(); - let mut is_at_head = true; - - for (nonce, weight, size) in weights { - match (nonce == expected_nonce, is_at_head) { - (true, _) => (), - (false, true) => { - // this may happen if some messages were already pruned from the source node - // - // this is not critical error and will be auto-resolved by messages lane (and target node) - log::info!( - target: "bridge", - "Some messages are missing from the {} node: {:?}. Target node may be out of sync?", - C::NAME, - expected_nonce..nonce, - ); - } - (false, false) => { - // some nonces are missing from the middle/tail of the range - // - // this is critical error, because we can't miss any nonces - return make_missing_nonce_error(expected_nonce); - } - } - - weights_map.insert( - nonce, - MessageWeights { - weight, - size: size as _, - }, - ); - expected_nonce = nonce + 1; - is_at_head = false; - } - - Ok(weights_map) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn make_message_weights_map_succeeds_if_no_messages_are_missing() { - assert_eq!( - make_message_weights_map::(vec![(1, 0, 0), (2, 0, 0), (3, 0, 0)], 1..=3,) - .unwrap(), - vec![ - (1, MessageWeights { weight: 0, size: 0 }), - (2, MessageWeights { weight: 0, size: 0 }), - (3, MessageWeights { weight: 0, size: 0 }), - ] - .into_iter() - .collect(), - ); - } - - #[test] - fn make_message_weights_map_succeeds_if_head_messages_are_missing() { - assert_eq!( - make_message_weights_map::(vec![(2, 0, 0), (3, 0, 0)], 1..=3,).unwrap(), - vec![ - (2, MessageWeights { weight: 0, size: 0 }), - (3, MessageWeights { weight: 0, size: 0 }), - ] - .into_iter() - .collect(), - ); - } - - #[test] - fn make_message_weights_map_fails_if_mid_messages_are_missing() { - assert!(matches!( - make_message_weights_map::(vec![(1, 0, 0), (3, 0, 0)], 1..=3,), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn make_message_weights_map_fails_if_tail_messages_are_missing() { - assert!(matches!( - make_message_weights_map::(vec![(1, 0, 0), (2, 0, 0)], 1..=3,), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn make_message_weights_map_fails_if_all_messages_are_missing() { - assert!(matches!( - make_message_weights_map::(vec![], 1..=3), - Err(SubstrateError::Custom(_)) - )); - } -} diff --git a/polkadot/bridges/relays/substrate/src/messages_target.rs b/polkadot/bridges/relays/substrate/src/messages_target.rs deleted file mode 100644 index e5ac8880c845d5acb097906751cb220d9b10b4a4..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/messages_target.rs +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate messages target. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! chain. - -use crate::messages_lane::SubstrateMessageLane; -use crate::messages_source::read_client_state; - -use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce, UnrewardedRelayersState}; -use bp_runtime::InstanceId; -use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; -use codec::{Decode, Encode}; -use messages_relay::{ - message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{TargetClient, TargetClientState}, -}; -use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf}; -use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase}; -use sp_core::Bytes; -use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; -use std::ops::RangeInclusive; - -/// Message receiving proof returned by the target Substrate node. -pub type SubstrateMessagesReceivingProof = ( - UnrewardedRelayersState, - FromBridgedChainMessagesDeliveryProof>, -); - -/// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - client: Client, - lane: P, - lane_id: LaneId, - instance: InstanceId, -} - -impl SubstrateMessagesTarget { - /// Create new Substrate headers target. - pub fn new(client: Client, lane: P, lane_id: LaneId, instance: InstanceId) -> Self { - SubstrateMessagesTarget { - client, - lane, - lane_id, - instance, - } - } -} - -impl Clone for SubstrateMessagesTarget { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - lane: self.lane.clone(), - lane_id: self.lane_id, - instance: self.instance, - } - } -} - -#[async_trait] -impl RelayClient for SubstrateMessagesTarget { - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await - } -} - -#[async_trait] -impl TargetClient

for SubstrateMessagesTarget -where - C: Chain, - C::Header: DeserializeOwned, - C::Index: DeserializeOwned, - ::Number: BlockNumberBase, - P: SubstrateMessageLane< - MessagesReceivingProof = SubstrateMessagesReceivingProof, - TargetHeaderNumber = ::Number, - TargetHeaderHash = ::Hash, - >, - P::SourceHeaderNumber: Decode, - P::SourceHeaderHash: Decode, -{ - async fn state(&self) -> Result, SubstrateError> { - // we can't continue to deliver messages if target node is out of sync, because - // it may have already received (some of) messages that we're going to deliver - self.client.ensure_synced().await?; - - read_client_state::<_, P::SourceHeaderHash, P::SourceHeaderNumber>( - &self.client, - P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET, - ) - .await - } - - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let latest_received_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, latest_received_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let latest_received_nonce: MessageNonce = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, latest_received_nonce)) - } - - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, UnrewardedRelayersState), SubstrateError> { - let encoded_response = self - .client - .state_call( - P::INBOUND_LANE_UNREWARDED_RELAYERS_STATE.into(), - Bytes(self.lane_id.encode()), - Some(id.1), - ) - .await?; - let unrewarded_relayers_state: UnrewardedRelayersState = - Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; - Ok((id, unrewarded_relayers_state)) - } - - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), SubstrateError> { - let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; - let proof = self - .client - .prove_messages_delivery(self.instance, self.lane_id, id.1) - .await?; - let proof = FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: id.1, - storage_proof: proof, - lane: self.lane_id, - }; - Ok((id, (relayers_state, proof))) - } - - async fn submit_messages_proof( - &self, - generated_at_header: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, SubstrateError> { - let tx = self - .lane - .make_messages_delivery_transaction(generated_at_header, nonces.clone(), proof) - .await?; - self.client.submit_extrinsic(Bytes(tx.encode())).await?; - Ok(nonces) - } -} diff --git a/polkadot/bridges/relays/substrate/src/millau_headers_to_rialto.rs b/polkadot/bridges/relays/substrate/src/millau_headers_to_rialto.rs deleted file mode 100644 index 8b77e71657a09995f685f3e12aae77b0405600fe..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/millau_headers_to_rialto.rs +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau-to-Rialto headers sync entrypoint. - -use crate::{ - headers_pipeline::{SubstrateHeadersSyncPipeline, SubstrateHeadersToSubstrate}, - MillauClient, RialtoClient, -}; - -use async_trait::async_trait; -use bp_millau::{ - BEST_MILLAU_BLOCKS_METHOD, FINALIZED_MILLAU_BLOCK_METHOD, INCOMPLETE_MILLAU_HEADERS_METHOD, - IS_KNOWN_MILLAU_BLOCK_METHOD, -}; -use headers_relay::sync_types::QueuedHeader; -use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SyncHeader as MillauSyncHeader}; -use relay_rialto_client::{BridgeMillauCall, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Error as SubstrateError, TransactionSignScheme}; -use sp_core::Pair; -use sp_runtime::Justification; - -/// Millau-to-Rialto headers sync pipeline. -pub(crate) type MillauHeadersToRialto = - SubstrateHeadersToSubstrate; -/// Millau header in-the-queue. -type QueuedMillauHeader = QueuedHeader; - -#[async_trait] -impl SubstrateHeadersSyncPipeline for MillauHeadersToRialto { - const BEST_BLOCK_METHOD: &'static str = BEST_MILLAU_BLOCKS_METHOD; - const FINALIZED_BLOCK_METHOD: &'static str = FINALIZED_MILLAU_BLOCK_METHOD; - const IS_KNOWN_BLOCK_METHOD: &'static str = IS_KNOWN_MILLAU_BLOCK_METHOD; - const INCOMPLETE_HEADERS_METHOD: &'static str = INCOMPLETE_MILLAU_HEADERS_METHOD; - - type SignedTransaction = ::SignedTransaction; - - async fn make_submit_header_transaction( - &self, - header: QueuedMillauHeader, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeMillauCall::import_signed_header(header.header().clone().into_inner()).into(); - let transaction = Rialto::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } - - async fn make_complete_header_transaction( - &self, - id: MillauHeaderId, - completion: Justification, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeMillauCall::finalize_header(id.1, completion).into(); - let transaction = Rialto::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } -} - -/// Run Millau-to-Rialto headers sync. -pub async fn run( - millau_client: MillauClient, - rialto_client: RialtoClient, - rialto_sign: RialtoSigningParams, - metrics_params: Option, -) { - crate::headers_pipeline::run( - MillauHeadersToRialto::new(rialto_client.clone(), rialto_sign), - millau_client, - rialto_client, - metrics_params, - ) - .await; -} diff --git a/polkadot/bridges/relays/substrate/src/millau_messages_to_rialto.rs b/polkadot/bridges/relays/substrate/src/millau_messages_to_rialto.rs deleted file mode 100644 index ebab5cfb381d3d6a33b833d14c459408877b9ac1..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/millau_messages_to_rialto.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau-to-Rialto messages sync entrypoint. - -use crate::messages_lane::{select_delivery_transaction_limits, SubstrateMessageLane, SubstrateMessageLaneToSubstrate}; -use crate::messages_source::SubstrateMessagesSource; -use crate::messages_target::SubstrateMessagesTarget; -use crate::{MillauClient, RialtoClient}; - -use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; -use codec::Encode; -use frame_support::dispatch::GetDispatchInfo; -use messages_relay::message_lane::MessageLane; -use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Chain, Error as SubstrateError, TransactionSignScheme}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; -use std::{ops::RangeInclusive, time::Duration}; - -/// Millau-to-Rialto message lane. -type MillauMessagesToRialto = SubstrateMessageLaneToSubstrate; - -#[async_trait] -impl SubstrateMessageLane for MillauMessagesToRialto { - const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = - bp_rialto::TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD; - const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = - bp_rialto::TO_RIALTO_LATEST_GENERATED_NONCE_METHOD; - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD; - - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD; - const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = - bp_millau::FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD; - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE; - - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::FINALIZED_MILLAU_BLOCK_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rialto::FINALIZED_RIALTO_BLOCK_METHOD; - - type SourceSignedTransaction = ::SignedTransaction; - type TargetSignedTransaction = ::SignedTransaction; - - async fn make_messages_receiving_proof_transaction( - &self, - _generated_at_block: RialtoHeaderId, - proof: ::MessagesReceivingProof, - ) -> Result { - let (relayers_state, proof) = proof; - let account_id = self.source_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.source_client.next_account_index(account_id).await?; - let call: millau_runtime::Call = - millau_runtime::MessageLaneCall::receive_messages_delivery_proof(proof, relayers_state).into(); - let call_weight = call.get_dispatch_info().weight; - let transaction = Millau::sign_transaction(&self.source_client, &self.source_sign.signer, nonce, call); - log::trace!( - target: "bridge", - "Prepared Rialto -> Millau confirmation transaction. Weight: {}/{}, size: {}/{}", - call_weight, - bp_millau::max_extrinsic_weight(), - transaction.encode().len(), - bp_millau::max_extrinsic_size(), - ); - Ok(transaction) - } - - async fn make_messages_delivery_transaction( - &self, - _generated_at_header: MillauHeaderId, - _nonces: RangeInclusive, - proof: ::MessagesProof, - ) -> Result { - let (dispatch_weight, proof) = proof; - let FromBridgedChainMessagesProof { - ref nonces_start, - ref nonces_end, - .. - } = proof; - let messages_count = nonces_end - nonces_start + 1; - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call: rialto_runtime::Call = rialto_runtime::MessageLaneCall::receive_messages_proof( - self.relayer_id_at_source.clone(), - proof, - messages_count as _, - dispatch_weight, - ) - .into(); - let call_weight = call.get_dispatch_info().weight; - let transaction = Rialto::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - log::trace!( - target: "bridge", - "Prepared Millau -> Rialto delivery transaction. Weight: {}/{}, size: {}/{}", - call_weight, - bp_rialto::max_extrinsic_weight(), - transaction.encode().len(), - bp_rialto::max_extrinsic_size(), - ); - Ok(transaction) - } -} - -/// Millau node as messages source. -type MillauSourceClient = SubstrateMessagesSource; - -/// Rialto node as messages target. -type RialtoTargetClient = SubstrateMessagesTarget; - -/// Run Millau-to-Rialto messages sync. -pub fn run( - millau_client: MillauClient, - millau_sign: MillauSigningParams, - rialto_client: RialtoClient, - rialto_sign: RialtoSigningParams, - lane_id: LaneId, - metrics_params: Option, -) { - let stall_timeout = Duration::from_secs(5 * 60); - let relayer_id_at_millau = millau_sign.signer.public().as_array_ref().clone().into(); - - let lane = MillauMessagesToRialto { - source_client: millau_client.clone(), - source_sign: millau_sign, - target_client: rialto_client.clone(), - target_sign: rialto_sign, - relayer_id_at_source: relayer_id_at_millau, - }; - - // 2/3 is reserved for proofs and tx overhead - let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() as usize / 3; - // TODO: use Millau weights after https://github.com/paritytech/parity-bridges-common/issues/390 - let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( - bp_rialto::max_extrinsic_weight(), - bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - ); - - log::info!( - target: "bridge", - "Starting Millau -> Rialto messages relay.\n\t\ - Millau relayer account id: {:?}\n\t\ - Max messages in single transaction: {}\n\t\ - Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}", - lane.relayer_id_at_source, - max_messages_in_single_batch, - max_messages_size_in_single_batch, - max_messages_weight_in_single_batch, - ); - - messages_relay::message_lane_loop::run( - messages_relay::message_lane_loop::Params { - lane: lane_id, - source_tick: Millau::AVERAGE_BLOCK_INTERVAL, - target_tick: Rialto::AVERAGE_BLOCK_INTERVAL, - reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, - stall_timeout, - delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - max_unconfirmed_nonces_at_target: bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, - max_messages_in_single_batch, - max_messages_weight_in_single_batch, - max_messages_size_in_single_batch, - }, - }, - MillauSourceClient::new(millau_client, lane.clone(), lane_id, RIALTO_BRIDGE_INSTANCE), - RialtoTargetClient::new(rialto_client, lane, lane_id, MILLAU_BRIDGE_INSTANCE), - metrics_params, - futures::future::pending(), - ); -} diff --git a/polkadot/bridges/relays/substrate/src/rialto_headers_to_millau.rs b/polkadot/bridges/relays/substrate/src/rialto_headers_to_millau.rs deleted file mode 100644 index 3a13c6e148c6383a472761bb13a810d9d3e781ea..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/rialto_headers_to_millau.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-to-Millau headers sync entrypoint. - -use crate::{ - headers_pipeline::{SubstrateHeadersSyncPipeline, SubstrateHeadersToSubstrate}, - MillauClient, RialtoClient, -}; - -use async_trait::async_trait; -use bp_rialto::{ - BEST_RIALTO_BLOCKS_METHOD, FINALIZED_RIALTO_BLOCK_METHOD, INCOMPLETE_RIALTO_HEADERS_METHOD, - IS_KNOWN_RIALTO_BLOCK_METHOD, -}; -use headers_relay::sync_types::QueuedHeader; -use relay_millau_client::{BridgeRialtoCall, Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SyncHeader as RialtoSyncHeader}; -use relay_substrate_client::{Error as SubstrateError, TransactionSignScheme}; -use sp_core::Pair; -use sp_runtime::Justification; - -/// Rialto-to-Millau headers sync pipeline. -type RialtoHeadersToMillau = SubstrateHeadersToSubstrate; -/// Rialto header in-the-queue. -type QueuedRialtoHeader = QueuedHeader; - -#[async_trait] -impl SubstrateHeadersSyncPipeline for RialtoHeadersToMillau { - const BEST_BLOCK_METHOD: &'static str = BEST_RIALTO_BLOCKS_METHOD; - const FINALIZED_BLOCK_METHOD: &'static str = FINALIZED_RIALTO_BLOCK_METHOD; - const IS_KNOWN_BLOCK_METHOD: &'static str = IS_KNOWN_RIALTO_BLOCK_METHOD; - const INCOMPLETE_HEADERS_METHOD: &'static str = INCOMPLETE_RIALTO_HEADERS_METHOD; - - type SignedTransaction = ::SignedTransaction; - - async fn make_submit_header_transaction( - &self, - header: QueuedRialtoHeader, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeRialtoCall::import_signed_header(header.header().clone().into_inner()).into(); - let transaction = Millau::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } - - async fn make_complete_header_transaction( - &self, - id: RialtoHeaderId, - completion: Justification, - ) -> Result { - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call = BridgeRialtoCall::finalize_header(id.1, completion).into(); - let transaction = Millau::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - Ok(transaction) - } -} - -/// Run Rialto-to-Millau headers sync. -pub async fn run( - rialto_client: RialtoClient, - millau_client: MillauClient, - millau_sign: MillauSigningParams, - metrics_params: Option, -) { - crate::headers_pipeline::run( - RialtoHeadersToMillau::new(millau_client.clone(), millau_sign), - rialto_client, - millau_client, - metrics_params, - ) - .await; -} diff --git a/polkadot/bridges/relays/substrate/src/rialto_messages_to_millau.rs b/polkadot/bridges/relays/substrate/src/rialto_messages_to_millau.rs deleted file mode 100644 index 1c11a111413c9f7702382bb79c099acc9275f10a..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/substrate/src/rialto_messages_to_millau.rs +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-to-Millau messages sync entrypoint. - -use crate::messages_lane::{select_delivery_transaction_limits, SubstrateMessageLane, SubstrateMessageLaneToSubstrate}; -use crate::messages_source::SubstrateMessagesSource; -use crate::messages_target::SubstrateMessagesTarget; -use crate::{MillauClient, RialtoClient}; - -use async_trait::async_trait; -use bp_message_lane::{LaneId, MessageNonce}; -use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; -use codec::Encode; -use frame_support::dispatch::GetDispatchInfo; -use messages_relay::message_lane::MessageLane; -use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; -use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; -use relay_substrate_client::{Chain, Error as SubstrateError, TransactionSignScheme}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; -use std::{ops::RangeInclusive, time::Duration}; - -/// Rialto-to-Millau message lane. -type RialtoMessagesToMillau = SubstrateMessageLaneToSubstrate; - -#[async_trait] -impl SubstrateMessageLane for RialtoMessagesToMillau { - const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = - bp_millau::TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD; - const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = - bp_millau::TO_MILLAU_LATEST_GENERATED_NONCE_METHOD; - const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD; - - const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD; - const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = - bp_rialto::FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD; - const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE; - - const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::FINALIZED_RIALTO_BLOCK_METHOD; - const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_millau::FINALIZED_MILLAU_BLOCK_METHOD; - - type SourceSignedTransaction = ::SignedTransaction; - type TargetSignedTransaction = ::SignedTransaction; - - async fn make_messages_receiving_proof_transaction( - &self, - _generated_at_block: MillauHeaderId, - proof: ::MessagesReceivingProof, - ) -> Result { - let (relayers_state, proof) = proof; - let account_id = self.source_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.source_client.next_account_index(account_id).await?; - let call: rialto_runtime::Call = - rialto_runtime::MessageLaneCall::receive_messages_delivery_proof(proof, relayers_state).into(); - let call_weight = call.get_dispatch_info().weight; - let transaction = Rialto::sign_transaction(&self.source_client, &self.source_sign.signer, nonce, call); - log::trace!( - target: "bridge", - "Prepared Millau -> Rialto confirmation transaction. Weight: {}/{}, size: {}/{}", - call_weight, - bp_rialto::max_extrinsic_weight(), - transaction.encode().len(), - bp_rialto::max_extrinsic_size(), - ); - Ok(transaction) - } - - async fn make_messages_delivery_transaction( - &self, - _generated_at_header: RialtoHeaderId, - _nonces: RangeInclusive, - proof: ::MessagesProof, - ) -> Result { - let (dispatch_weight, proof) = proof; - let FromBridgedChainMessagesProof { - ref nonces_start, - ref nonces_end, - .. - } = proof; - let messages_count = nonces_end - nonces_start + 1; - let account_id = self.target_sign.signer.public().as_array_ref().clone().into(); - let nonce = self.target_client.next_account_index(account_id).await?; - let call: millau_runtime::Call = millau_runtime::MessageLaneCall::receive_messages_proof( - self.relayer_id_at_source.clone(), - proof, - messages_count as _, - dispatch_weight, - ) - .into(); - let call_weight = call.get_dispatch_info().weight; - let transaction = Millau::sign_transaction(&self.target_client, &self.target_sign.signer, nonce, call); - log::trace!( - target: "bridge", - "Prepared Rialto -> Millau delivery transaction. Weight: {}/{}, size: {}/{}", - call_weight, - bp_millau::max_extrinsic_weight(), - transaction.encode().len(), - bp_millau::max_extrinsic_size(), - ); - Ok(transaction) - } -} - -/// Rialto node as messages source. -type RialtoSourceClient = SubstrateMessagesSource; - -/// Millau node as messages target. -type MillauTargetClient = SubstrateMessagesTarget; - -/// Run Rialto-to-Millau messages sync. -pub fn run( - rialto_client: RialtoClient, - rialto_sign: RialtoSigningParams, - millau_client: MillauClient, - millau_sign: MillauSigningParams, - lane_id: LaneId, - metrics_params: Option, -) { - let stall_timeout = Duration::from_secs(5 * 60); - let relayer_id_at_rialto = rialto_sign.signer.public().as_array_ref().clone().into(); - - let lane = RialtoMessagesToMillau { - source_client: rialto_client.clone(), - source_sign: rialto_sign, - target_client: millau_client.clone(), - target_sign: millau_sign, - relayer_id_at_source: relayer_id_at_rialto, - }; - - // 2/3 is reserved for proofs and tx overhead - let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() as usize / 3; - let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - select_delivery_transaction_limits::>( - bp_millau::max_extrinsic_weight(), - bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - ); - - log::info!( - target: "bridge", - "Starting Rialto -> Millau messages relay.\n\t\ - Rialto relayer account id: {:?}\n\t\ - Max messages in single transaction: {}\n\t\ - Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}", - lane.relayer_id_at_source, - max_messages_in_single_batch, - max_messages_size_in_single_batch, - max_messages_weight_in_single_batch, - ); - - messages_relay::message_lane_loop::run( - messages_relay::message_lane_loop::Params { - lane: lane_id, - source_tick: Rialto::AVERAGE_BLOCK_INTERVAL, - target_tick: Millau::AVERAGE_BLOCK_INTERVAL, - reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, - stall_timeout, - delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, - max_unconfirmed_nonces_at_target: bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, - max_messages_in_single_batch, - max_messages_weight_in_single_batch, - max_messages_size_in_single_batch, - }, - }, - RialtoSourceClient::new(rialto_client, lane.clone(), lane_id, MILLAU_BRIDGE_INSTANCE), - MillauTargetClient::new(millau_client, lane, lane_id, RIALTO_BRIDGE_INSTANCE), - metrics_params, - futures::future::pending(), - ); -} diff --git a/polkadot/bridges/relays/utils/Cargo.toml b/polkadot/bridges/relays/utils/Cargo.toml deleted file mode 100644 index ce6a20bbc4fee380608a4c0a57128eba738dfbfa..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/utils/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "relay-utils" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -ansi_term = "0.12" -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -env_logger = "0.8.2" -futures = "0.3.5" -log = "0.4.11" -num-traits = "0.2" -sysinfo = "0.15" -time = "0.2" - -# Substrate dependencies - -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/utils/src/initialize.rs b/polkadot/bridges/relays/utils/src/initialize.rs deleted file mode 100644 index 6b0efb49e480bbe5db16f97ceb58103e390389f5..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/utils/src/initialize.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relayer initialization functions. - -use std::io::Write; - -/// Initialize relay environment. -pub fn initialize_relay() { - let mut builder = env_logger::Builder::new(); - - let filters = match std::env::var("RUST_LOG") { - Ok(env_filters) => format!("bridge=info,{}", env_filters), - Err(_) => "bridge=info".into(), - }; - - builder.parse_filters(&filters); - builder.format(move |buf, record| { - writeln!(buf, "{}", { - let timestamp = time::OffsetDateTime::try_now_local() - .unwrap_or_else(|_| time::OffsetDateTime::now_utc()) - .format("%Y-%m-%d %H:%M:%S %z"); - if cfg!(windows) { - format!("{} {} {} {}", timestamp, record.level(), record.target(), record.args()) - } else { - use ansi_term::Colour as Color; - let log_level = match record.level() { - log::Level::Error => Color::Fixed(9).bold().paint(record.level().to_string()), - log::Level::Warn => Color::Fixed(11).bold().paint(record.level().to_string()), - log::Level::Info => Color::Fixed(10).paint(record.level().to_string()), - log::Level::Debug => Color::Fixed(14).paint(record.level().to_string()), - log::Level::Trace => Color::Fixed(12).paint(record.level().to_string()), - }; - format!( - "{} {} {} {}", - Color::Fixed(8).bold().paint(timestamp), - log_level, - Color::Fixed(8).paint(record.target()), - record.args() - ) - } - }) - }); - - builder.init(); -} diff --git a/polkadot/bridges/relays/utils/src/lib.rs b/polkadot/bridges/relays/utils/src/lib.rs deleted file mode 100644 index f787e8763a7a5479bf6e264908fa99b5256afe26..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/utils/src/lib.rs +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities used by different relays. - -use backoff::{backoff::Backoff, ExponentialBackoff}; -use futures::future::FutureExt; -use std::time::Duration; - -/// Max delay after connection-unrelated error happened before we'll try the -/// same request again. -pub const MAX_BACKOFF_INTERVAL: Duration = Duration::from_secs(60); -/// Delay after connection-related error happened before we'll try -/// reconnection again. -pub const CONNECTION_ERROR_DELAY: Duration = Duration::from_secs(10); - -pub mod initialize; -pub mod metrics; -pub mod relay_loop; - -/// Block number traits shared by all chains that relay is able to serve. -pub trait BlockNumberBase: - 'static - + From - + Into - + Ord - + Clone - + Copy - + Default - + Send - + Sync - + std::fmt::Debug - + std::fmt::Display - + std::hash::Hash - + std::ops::Add - + std::ops::Sub - + num_traits::CheckedSub - + num_traits::Saturating - + num_traits::Zero - + num_traits::One -{ -} - -impl BlockNumberBase for T where - T: 'static - + From - + Into - + Ord - + Clone - + Copy - + Default - + Send - + Sync - + std::fmt::Debug - + std::fmt::Display - + std::hash::Hash - + std::ops::Add - + std::ops::Sub - + num_traits::CheckedSub - + num_traits::Saturating - + num_traits::Zero - + num_traits::One -{ -} - -/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). -#[macro_export] -macro_rules! bail_on_error { - ($result: expr) => { - match $result { - (client, Ok(result)) => (client, result), - (client, Err(error)) => return (client, Err(error)), - } - }; -} - -/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). -#[macro_export] -macro_rules! bail_on_arg_error { - ($result: expr, $client: ident) => { - match $result { - Ok(result) => result, - Err(error) => return ($client, Err(error)), - } - }; -} - -/// Ethereum header Id. -#[derive(Debug, Default, Clone, Copy, Eq, Hash, PartialEq)] -pub struct HeaderId(pub Number, pub Hash); - -/// Error type that can signal connection errors. -pub trait MaybeConnectionError { - /// Returns true if error (maybe) represents connection error. - fn is_connection_error(&self) -> bool; -} - -/// Stringified error that may be either connection-related or not. -#[derive(Debug)] -pub enum StringifiedMaybeConnectionError { - /// The error is connection-related error. - Connection(String), - /// The error is connection-unrelated error. - NonConnection(String), -} - -impl StringifiedMaybeConnectionError { - /// Create new stringified connection error. - pub fn new(is_connection_error: bool, error: String) -> Self { - if is_connection_error { - StringifiedMaybeConnectionError::Connection(error) - } else { - StringifiedMaybeConnectionError::NonConnection(error) - } - } -} - -impl MaybeConnectionError for StringifiedMaybeConnectionError { - fn is_connection_error(&self) -> bool { - match *self { - StringifiedMaybeConnectionError::Connection(_) => true, - StringifiedMaybeConnectionError::NonConnection(_) => false, - } - } -} - -impl ToString for StringifiedMaybeConnectionError { - fn to_string(&self) -> String { - match *self { - StringifiedMaybeConnectionError::Connection(ref err) => err.clone(), - StringifiedMaybeConnectionError::NonConnection(ref err) => err.clone(), - } - } -} - -/// Exponential backoff for connection-unrelated errors retries. -pub fn retry_backoff() -> ExponentialBackoff { - ExponentialBackoff { - // we do not want relayer to stop - max_elapsed_time: None, - max_interval: MAX_BACKOFF_INTERVAL, - ..Default::default() - } -} - -/// Compact format of IDs vector. -pub fn format_ids(mut ids: impl ExactSizeIterator) -> String { - const NTH_PROOF: &str = "we have checked len; qed"; - match ids.len() { - 0 => "".into(), - 1 => format!("{:?}", ids.next().expect(NTH_PROOF)), - 2 => { - let id0 = ids.next().expect(NTH_PROOF); - let id1 = ids.next().expect(NTH_PROOF); - format!("[{:?}, {:?}]", id0, id1) - } - len => { - let id0 = ids.next().expect(NTH_PROOF); - let id_last = ids.last().expect(NTH_PROOF); - format!("{}:[{:?} ... {:?}]", len, id0, id_last) - } - } -} - -/// Stream that emits item every `timeout_ms` milliseconds. -pub fn interval(timeout: Duration) -> impl futures::Stream { - futures::stream::unfold((), move |_| async move { - async_std::task::sleep(timeout).await; - Some(((), ())) - }) -} - -/// Which client has caused error. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum FailedClient { - /// It is the source client who has caused error. - Source, - /// It is the target client who has caused error. - Target, - /// Both clients are failing, or we just encountered some other error that - /// should be treated like that. - Both, -} - -/// Future process result. -#[derive(Debug, Clone, Copy)] -pub enum ProcessFutureResult { - /// Future has been processed successfully. - Success, - /// Future has failed with non-connection error. - Failed, - /// Future has failed with connection error. - ConnectionFailed, -} - -impl ProcessFutureResult { - /// Returns true if result is Success. - pub fn is_ok(self) -> bool { - match self { - ProcessFutureResult::Success => true, - ProcessFutureResult::Failed | ProcessFutureResult::ConnectionFailed => false, - } - } - - /// Returns Ok(true) if future has succeeded. - /// Returns Ok(false) if future has failed with non-connection error. - /// Returns Err if future is `ConnectionFailed`. - pub fn fail_if_connection_error(self, failed_client: FailedClient) -> Result { - match self { - ProcessFutureResult::Success => Ok(true), - ProcessFutureResult::Failed => Ok(false), - ProcessFutureResult::ConnectionFailed => Err(failed_client), - } - } -} - -/// Process result of the future from a client. -pub fn process_future_result( - result: Result, - retry_backoff: &mut ExponentialBackoff, - on_success: impl FnOnce(TResult), - go_offline_future: &mut std::pin::Pin<&mut futures::future::Fuse>, - go_offline: impl FnOnce(Duration) -> TGoOfflineFuture, - error_pattern: impl FnOnce() -> String, -) -> ProcessFutureResult -where - TError: std::fmt::Debug + MaybeConnectionError, - TGoOfflineFuture: FutureExt, -{ - match result { - Ok(result) => { - on_success(result); - retry_backoff.reset(); - ProcessFutureResult::Success - } - Err(error) if error.is_connection_error() => { - log::error!( - target: "bridge", - "{}: {:?}. Going to restart", - error_pattern(), - error, - ); - - retry_backoff.reset(); - go_offline_future.set(go_offline(CONNECTION_ERROR_DELAY).fuse()); - ProcessFutureResult::ConnectionFailed - } - Err(error) => { - let retry_delay = retry_backoff.next_backoff().unwrap_or(CONNECTION_ERROR_DELAY); - log::error!( - target: "bridge", - "{}: {:?}. Retrying in {}", - error_pattern(), - error, - retry_delay.as_secs_f64(), - ); - - go_offline_future.set(go_offline(retry_delay).fuse()); - ProcessFutureResult::Failed - } - } -} diff --git a/polkadot/bridges/relays/utils/src/metrics.rs b/polkadot/bridges/relays/utils/src/metrics.rs deleted file mode 100644 index f38d1bda3a5d8db934c4c7f0c876cd59ba8d40c7..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/utils/src/metrics.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -pub use substrate_prometheus_endpoint::{register, Counter, CounterVec, Gauge, GaugeVec, Opts, Registry, F64, U64}; - -use async_std::sync::{Arc, Mutex}; -use std::net::SocketAddr; -use substrate_prometheus_endpoint::init_prometheus; -use sysinfo::{ProcessExt, RefreshKind, System, SystemExt}; - -/// Prometheus endpoint MetricsParams. -#[derive(Debug, Clone)] -pub struct MetricsParams { - /// Serve HTTP requests at given host. - pub host: String, - /// Serve HTTP requests at given port. - pub port: u16, -} - -/// Metrics API. -pub trait Metrics { - /// Register metrics in the registry. - fn register(&self, registry: &Registry) -> Result<(), String>; -} - -/// Global Prometheus metrics. -#[derive(Debug, Clone)] -pub struct GlobalMetrics { - system: Arc>, - system_average_load: GaugeVec, - process_cpu_usage_percentage: Gauge, - process_memory_usage_bytes: Gauge, -} - -/// Start Prometheus endpoint with given metrics registry. -pub fn start( - prefix: String, - params: Option, - global_metrics: &GlobalMetrics, - extra_metrics: &impl Metrics, -) { - let params = match params { - Some(params) => params, - None => return, - }; - - assert!(!prefix.is_empty(), "Metrics prefix can not be empty"); - - let do_start = move || { - let prometheus_socket_addr = SocketAddr::new( - params - .host - .parse() - .map_err(|err| format!("Invalid Prometheus host {}: {}", params.host, err))?, - params.port, - ); - let metrics_registry = - Registry::new_custom(Some(prefix), None).expect("only fails if prefix is empty; prefix is not empty; qed"); - global_metrics.register(&metrics_registry)?; - extra_metrics.register(&metrics_registry)?; - async_std::task::spawn(async move { - init_prometheus(prometheus_socket_addr, metrics_registry) - .await - .map_err(|err| format!("Error starting Prometheus endpoint: {}", err)) - }); - - Ok(()) - }; - - let result: Result<(), String> = do_start(); - if let Err(err) = result { - log::warn!( - target: "bridge", - "Failed to expose metrics: {}", - err, - ); - } -} - -impl Default for MetricsParams { - fn default() -> Self { - MetricsParams { - host: "127.0.0.1".into(), - port: 9616, - } - } -} - -impl Metrics for GlobalMetrics { - fn register(&self, registry: &Registry) -> Result<(), String> { - register(self.system_average_load.clone(), registry).map_err(|e| e.to_string())?; - register(self.process_cpu_usage_percentage.clone(), registry).map_err(|e| e.to_string())?; - register(self.process_memory_usage_bytes.clone(), registry).map_err(|e| e.to_string())?; - Ok(()) - } -} - -impl Default for GlobalMetrics { - fn default() -> Self { - GlobalMetrics { - system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), - system_average_load: GaugeVec::new(Opts::new("system_average_load", "System load average"), &["over"]) - .expect("metric is static and thus valid; qed"), - process_cpu_usage_percentage: Gauge::new("process_cpu_usage_percentage", "Process CPU usage") - .expect("metric is static and thus valid; qed"), - process_memory_usage_bytes: Gauge::new( - "process_memory_usage_bytes", - "Process memory (resident set size) usage", - ) - .expect("metric is static and thus valid; qed"), - } - } -} - -impl GlobalMetrics { - /// Update metrics. - pub async fn update(&self) { - // update system-wide metrics - let mut system = self.system.lock().await; - let load = system.get_load_average(); - self.system_average_load.with_label_values(&["1min"]).set(load.one); - self.system_average_load.with_label_values(&["5min"]).set(load.five); - self.system_average_load.with_label_values(&["15min"]).set(load.fifteen); - - // update process-related metrics - let pid = sysinfo::get_current_pid().expect( - "only fails where pid is unavailable (os=unknown || arch=wasm32);\ - relay is not supposed to run in such MetricsParamss;\ - qed", - ); - let is_process_refreshed = system.refresh_process(pid); - match (is_process_refreshed, system.get_process(pid)) { - (true, Some(process_info)) => { - let cpu_usage = process_info.cpu_usage() as f64; - let memory_usage = process_info.memory() * 1024; - log::trace!( - target: "bridge-metrics", - "Refreshed process metrics: CPU={}, memory={}", - cpu_usage, - memory_usage, - ); - - self.process_cpu_usage_percentage - .set(if cpu_usage.is_finite() { cpu_usage } else { 0f64 }); - self.process_memory_usage_bytes.set(memory_usage); - } - _ => { - log::warn!( - target: "bridge", - "Failed to refresh process information. Metrics may show obsolete values", - ); - } - } - } -} diff --git a/polkadot/bridges/relays/utils/src/relay_loop.rs b/polkadot/bridges/relays/utils/src/relay_loop.rs deleted file mode 100644 index d750358edaa02c3bab0254285b0d4330a38e6f01..0000000000000000000000000000000000000000 --- a/polkadot/bridges/relays/utils/src/relay_loop.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{FailedClient, MaybeConnectionError}; - -use async_trait::async_trait; -use std::{fmt::Debug, future::Future, time::Duration}; - -/// Default pause between reconnect attempts. -pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); - -/// Basic blockchain client from relay perspective. -#[async_trait] -pub trait Client: Clone + Send + Sync { - /// Type of error this clients returns. - type Error: Debug + MaybeConnectionError; - - /// Try to reconnect to source node. - async fn reconnect(&mut self) -> Result<(), Self::Error>; -} - -/// Run relay loop. -/// -/// This function represents an outer loop, which in turn calls provided `loop_run` function to do -/// actual job. When `loop_run` returns, this outer loop reconnects to failed client (source, -/// target or both) and calls `loop_run` again. -pub fn run( - reconnect_delay: Duration, - mut source_client: SC, - mut target_client: TC, - loop_run: R, -) where - R: Fn(SC, TC) -> F, - F: Future>, -{ - let mut local_pool = futures::executor::LocalPool::new(); - - local_pool.run_until(async move { - loop { - let result = loop_run(source_client.clone(), target_client.clone()).await; - - match result { - Ok(()) => break, - Err(failed_client) => loop { - async_std::task::sleep(reconnect_delay).await; - if failed_client == FailedClient::Both || failed_client == FailedClient::Source { - match source_client.reconnect().await { - Ok(()) => (), - Err(error) => { - log::warn!( - target: "bridge", - "Failed to reconnect to source client. Going to retry in {}s: {:?}", - reconnect_delay.as_secs(), - error, - ); - continue; - } - } - } - if failed_client == FailedClient::Both || failed_client == FailedClient::Target { - match target_client.reconnect().await { - Ok(()) => (), - Err(error) => { - log::warn!( - target: "bridge", - "Failed to reconnect to target client. Going to retry in {}s: {:?}", - reconnect_delay.as_secs(), - error, - ); - continue; - } - } - } - - break; - }, - } - - log::debug!(target: "bridge", "Restarting relay loop"); - } - }); -} diff --git a/polkadot/bridges/rustfmt.toml b/polkadot/bridges/rustfmt.toml deleted file mode 100644 index 8ded863e80af2390432ee5db3b9f65848f3eefad..0000000000000000000000000000000000000000 --- a/polkadot/bridges/rustfmt.toml +++ /dev/null @@ -1,3 +0,0 @@ -hard_tabs = true -max_width = 120 -edition = "2018" diff --git a/polkadot/bridges/scripts/add_license.sh b/polkadot/bridges/scripts/add_license.sh deleted file mode 100755 index 49864b47c05f883f83ea9b0388d5fa189e1c95ee..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/add_license.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -PAT_GPL="^// Copyright.*If not, see \.$" -PAT_OTHER="^// Copyright" - -SCRIPTS_DIR=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) - -for f in $(find . -type f | egrep '\.(c|cpp|rs)$'); do - HEADER=$(head -16 $f) - if [[ $HEADER =~ $PAT_GPL ]]; then - BODY=$(tail -n +17 $f) - cat $SCRIPTS_DIR/license_header > temp - echo "$BODY" >> temp - mv temp $f - elif [[ $HEADER =~ $PAT_OTHER ]]; then - echo "Other license was found do nothing" - else - echo "$f was missing header" - cat $SCRIPTS_DIR/license_header $f > temp - mv temp $f - fi -done diff --git a/polkadot/bridges/scripts/ci-cache.sh b/polkadot/bridges/scripts/ci-cache.sh deleted file mode 100755 index 040d44fa74a0e7641b4135367807d77270d8b1ba..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/ci-cache.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -xeu - -echo $CARGO_TARGET_DIR; -mkdir -p $CARGO_TARGET_DIR; -echo "Current Rust nightly version:"; -rustc +nightly --version; -echo "Cached Rust nightly version:"; -if [ ! -f $CARGO_TARGET_DIR/check_nightly_rust ]; then - echo "" > $CARGO_TARGET_DIR/check_nightly_rust; -fi -cat $CARGO_TARGET_DIR/check_nightly_rust; -if [[ $(cat $CARGO_TARGET_DIR/check_nightly_rust) == $(rustc +nightly --version) ]]; then - echo "The Rust nightly version has not changed"; -else - echo "The Rust nightly version has changed. Clearing the cache"; - rm -rf $CARGO_TARGET_DIR/*; -fi diff --git a/polkadot/bridges/scripts/dump-logs.sh b/polkadot/bridges/scripts/dump-logs.sh deleted file mode 100644 index f076cbccbca168457593fee74c459552034f0803..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/dump-logs.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# A script to dump logs from selected important docker containers -# to make it easier to analyze locally. - -set -xeu - -DATE=$(date +"%Y-%m-%d-%T") -LOGS_DIR="${DATE//:/-}-logs" -mkdir $LOGS_DIR -cd $LOGS_DIR - -# From $ docker ps --format '{{.Names}}' - -SERVICES=(\ - deployments_relay-messages-millau-to-rialto-generator_1 \ - deployments_relay-messages-rialto-to-millau-generator_1 \ - deployments_relay-messages-millau-to-rialto_1 \ - deployments_relay-messages-rialto-to-millau_1 \ - deployments_relay-headers-millau-to-rialto_1 \ - deployments_relay-headers-rialto-to-millau_1 \ - deployments_rialto-node-alice_1 \ - deployments_rialto-node-bob_1 \ - deployments_millau-node-alice_1 \ - deployments_millau-node-bob_1 \ -) - -for SVC in ${SERVICES[*]} -do - SHORT_NAME="${SVC//deployments_/}" - docker logs $SVC &> $SHORT_NAME.log -done - -cd - -tar cvjf $LOGS_DIR.tar.bz2 $LOGS_DIR diff --git a/polkadot/bridges/scripts/license_header b/polkadot/bridges/scripts/license_header deleted file mode 100644 index b989aaa1cf04cf449d6dc4b0bb7448f3d191673f..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/license_header +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - diff --git a/polkadot/bridges/scripts/run-eth2sub-relay.sh b/polkadot/bridges/scripts/run-eth2sub-relay.sh deleted file mode 100755 index 2cf64a93780d21180aa300f3423d7f8748e9da89..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/run-eth2sub-relay.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Ethereum to Substrate relay. Needs running -# Substrate and Ethereum nodes in order to work. - -RUST_LOG=rpc=trace,bridge=trace ./target/debug/ethereum-poa-relay eth-to-sub diff --git a/polkadot/bridges/scripts/run-openethereum-node.sh b/polkadot/bridges/scripts/run-openethereum-node.sh deleted file mode 100755 index 62089baffe458d54bc9222f6403522babf2822ec..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/run-openethereum-node.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# This script assumes that an OpenEthereum build is available. The repo -# should be at the same level as the `parity-bridges-common` repo. - -RUST_LOG=rpc=trace,txqueue=trace,bridge-builtin=trace \ -../openethereum/target/debug/openethereum \ - --config="$(pwd)"/deployments/dev/poa-config/poa-node-config \ - --node-key=arthur \ - --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 \ - --base-path=/tmp/oe-dev-node \ diff --git a/polkadot/bridges/scripts/send-message.sh b/polkadot/bridges/scripts/send-message.sh deleted file mode 100755 index f7ceac13c0a2e9a1c21922572f605f2eec695f14..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/send-message.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Used for manually sending a message to a running network. -# -# You could for example spin up a full network using the Docker Compose files -# we have (to make sure the message relays are running), but remove the message -# generator service. From there you may submit messages manually using this script. - -case "$1" in - remark) - RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message millau-to-rialto \ - --millau-host localhost \ - --millau-port 20044 \ - --millau-signer //Dave \ - --rialto-signer //Dave \ - --lane 00000000 \ - --origin Target \ - remark \ - ;; - transfer) - RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message millau-to-rialto \ - --millau-host localhost \ - --millau-port 20044 \ - --millau-signer //Dave \ - --rialto-signer //Dave \ - --lane 00000000 \ - --origin Target \ - transfer \ - --amount 100000000000000 \ - --recipient 5DZvVvd1udr61vL7Xks17TFQ4fi9NiagYLaBobnbPCP14ewA \ - ;; - *) echo "A message type is require. Supported messages: remark, transfer."; exit 1;; -esac diff --git a/polkadot/bridges/scripts/update-weights.sh b/polkadot/bridges/scripts/update-weights.sh deleted file mode 100755 index cbf20730b414e9147d8e00be22b8361b0af39f85..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/update-weights.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -# Run this script from root of the repo - -cargo run --manifest-path=bin/rialto/node/Cargo.toml --release --features=runtime-benchmarks -- benchmark \ - --chain=local \ - --steps=50 \ - --repeat=20 \ - --pallet=pallet_message_lane \ - --extrinsic=* \ - --execution=wasm \ - --wasm-execution=Compiled \ - --heap-pages=4096 \ - --output=./modules/message-lane/src/weights.rs \ - --template=./.maintain/rialto-weight-template.hbs diff --git a/polkadot/bridges/scripts/update_substrate.sh b/polkadot/bridges/scripts/update_substrate.sh deleted file mode 100755 index f7715bda5d1c5623221669caedcb6a0e8465ac27..0000000000000000000000000000000000000000 --- a/polkadot/bridges/scripts/update_substrate.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# One-liner to update between Substrate releases -# Usage: ./update_substrate.sh 2.0.0-rc6 2.0.0 -set -xeu - -OLD_VERSION=$1 -NEW_VERSION=$2 - -find . -type f -name 'Cargo.toml' -exec sed -i '' -e "s/$OLD_VERSION/$NEW_VERSION/g" {} \;