From e5bed7ac380b6adb54b60a2a72a2a8f07f50d6c1 Mon Sep 17 00:00:00 2001 From: Hernando Castano Date: Wed, 21 Apr 2021 11:56:23 -0400 Subject: [PATCH] Squashed 'bridges/' content from commit 89a76998f git-subtree-dir: bridges git-subtree-split: 89a76998f93c8219e9b1f785dcce73d4891e7068 --- polkadot/.dependabot/config.yml | 22 + polkadot/.dockerignore | 1 + polkadot/.editorconfig | 16 + polkadot/.github/workflows/deny.yml | 39 + polkadot/.github/workflows/lint.yml | 43 + polkadot/.github/workflows/publish-deps.yml | 76 + polkadot/.github/workflows/publish-docker.yml | 93 + polkadot/.github/workflows/rust.yml | 175 + polkadot/.gitignore | 25 + polkadot/.maintain/rialto-weight-template.hbs | 103 + polkadot/CODE_OF_CONDUCT.md | 80 + polkadot/Cargo.lock | 9958 +++++++++++++++++ polkadot/Cargo.toml | 11 + polkadot/Dockerfile | 71 + polkadot/LICENSE | 675 ++ polkadot/README.md | 215 + polkadot/bin/.keep | 0 polkadot/bin/millau/node/Cargo.toml | 62 + polkadot/bin/millau/node/build.rs | 23 + polkadot/bin/millau/node/src/chain_spec.rs | 195 + polkadot/bin/millau/node/src/cli.rs | 70 + polkadot/bin/millau/node/src/command.rs | 172 + polkadot/bin/millau/node/src/lib.rs | 32 + polkadot/bin/millau/node/src/main.rs | 30 + polkadot/bin/millau/node/src/service.rs | 444 + polkadot/bin/millau/runtime/Cargo.toml | 106 + polkadot/bin/millau/runtime/build.rs | 26 + polkadot/bin/millau/runtime/src/lib.rs | 707 ++ .../bin/millau/runtime/src/rialto_messages.rs | 253 + polkadot/bin/rialto/node/Cargo.toml | 60 + polkadot/bin/rialto/node/build.rs | 23 + polkadot/bin/rialto/node/src/chain_spec.rs | 206 + polkadot/bin/rialto/node/src/cli.rs | 70 + polkadot/bin/rialto/node/src/command.rs | 172 + polkadot/bin/rialto/node/src/main.rs | 30 + polkadot/bin/rialto/node/src/service.rs | 445 + polkadot/bin/rialto/runtime/Cargo.toml | 132 + polkadot/bin/rialto/runtime/build.rs | 26 + polkadot/bin/rialto/runtime/src/benches.rs | 37 + polkadot/bin/rialto/runtime/src/exchange.rs | 260 + polkadot/bin/rialto/runtime/src/kovan.rs | 192 + polkadot/bin/rialto/runtime/src/lib.rs | 1148 ++ .../bin/rialto/runtime/src/millau_messages.rs | 253 + polkadot/bin/rialto/runtime/src/rialto_poa.rs | 175 + polkadot/bin/runtime-common/Cargo.toml | 56 + polkadot/bin/runtime-common/README.md | 176 + polkadot/bin/runtime-common/src/lib.rs | 22 + polkadot/bin/runtime-common/src/messages.rs | 1441 +++ .../src/messages_benchmarking.rs | 226 + polkadot/deny.toml | 201 + polkadot/deployments/BridgeDeps.Dockerfile | 32 + polkadot/deployments/README.md | 254 + .../bridges/poa-rialto/Front-end.Dockerfile | 26 + ...elay-poa-to-rialto-exchange-dashboard.json | 474 + ...relay-poa-to-rialto-headers-dashboard.json | 694 ++ ...relay-rialto-to-poa-headers-dashboard.json | 694 ++ .../dashboard/prometheus/targets.yml | 4 + .../bridges/poa-rialto/docker-compose.yml | 94 + .../poa-exchange-tx-generator-entrypoint.sh | 99 + .../relay-headers-poa-to-rialto-entrypoint.sh | 15 + .../relay-headers-rialto-to-poa-entrypoint.sh | 26 + .../relay-poa-exchange-rialto-entrypoint.sh | 16 + .../keys/BridgePoa/address_book.json | 1 + .../poa-config/keys/BridgePoa/arthur.json | 1 + .../poa-config/keys/BridgePoa/bertha.json | 1 + .../poa-config/keys/BridgePoa/carlos.json | 1 + .../poa-config/keys/BridgePoa/diego.json | 1 + .../bridges/poa-rialto/poa-config/pass | 1 + .../poa-rialto/poa-config/poa-node-config | 20 + .../bridges/poa-rialto/poa-config/poa.json | 184 + .../bridges/poa-rialto/poa-config/reserved | 3 + ...y-millau-to-rialto-messages-dashboard.json | 1429 +++ ...y-rialto-to-millau-messages-dashboard.json | 1420 +++ .../rialto-millau-maintenance-dashboard.json | 454 + .../dashboard/prometheus/targets.yml | 4 + .../bridges/rialto-millau/docker-compose.yml | 95 + ...ay-messages-millau-to-rialto-entrypoint.sh | 18 + ...ay-messages-rialto-to-millau-entrypoint.sh | 18 + ...messages-to-millau-generator-entrypoint.sh | 121 + ...messages-to-rialto-generator-entrypoint.sh | 121 + .../relay-millau-rialto-entrypoint.sh | 33 + ...y-westend-to-millau-headers-dashboard.json | 694 ++ .../dashboard/prometheus/targets.yml | 2 + .../bridges/westend-millau/docker-compose.yml | 31 + ...ay-headers-westend-to-millau-entrypoint.sh | 25 + .../keys/BridgePoa/address_book.json | 1 + .../dev/poa-config/keys/BridgePoa/arthur.json | 1 + polkadot/deployments/dev/poa-config/pass | 1 + .../dev/poa-config/poa-node-config | 17 + polkadot/deployments/dev/poa-config/poa.json | 178 + .../local-scripts/bridge-entrypoint.sh | 7 + .../relay-headers-rococo-to-westend.sh | 24 + .../relay-headers-westend-to-rococo.sh | 24 + .../relay-messages-millau-to-rialto.sh | 20 + .../relay-messages-rialto-to-millau.sh | 20 + .../local-scripts/relay-millau-to-rialto.sh | 27 + .../local-scripts/relay-rialto-to-millau.sh | 27 + .../local-scripts/run-millau-node.sh | 11 + .../local-scripts/run-rialto-node.sh | 11 + .../local-scripts/run-rococo-bob-node.sh | 14 + .../local-scripts/run-rococo-node.sh | 14 + .../local-scripts/run-westend-node.sh | 14 + .../monitoring/GrafanaMatrix.Dockerfile | 18 + polkadot/deployments/monitoring/disabled.yml | 15 + .../deployments/monitoring/docker-compose.yml | 32 + .../monitoring/grafana-matrix/config.yml | 49 + .../dashboards/grafana-dashboard.yaml | 6 + .../datasources/grafana-datasource.yaml | 16 + .../notifiers/grafana-notifier.yaml | 15 + .../monitoring/prometheus/prometheus.yml | 7 + .../networks/OpenEthereum.Dockerfile | 91 + polkadot/deployments/networks/eth-poa.yml | 46 + polkadot/deployments/networks/millau.yml | 87 + polkadot/deployments/networks/rialto.yml | 87 + polkadot/deployments/reverse-proxy/README.md | 15 + .../reverse-proxy/docker-compose.yml | 42 + polkadot/deployments/run.sh | 162 + polkadot/deployments/types-millau.json | 172 + polkadot/deployments/types-rialto.json | 171 + polkadot/deployments/types/build.sh | 15 + polkadot/deployments/types/common.json | 159 + polkadot/deployments/types/millau.json | 16 + polkadot/deployments/types/rialto.json | 14 + polkadot/diagrams/ARCHITECTURE.md | 13 + .../bridge-architecture-diagrams.drawio | 1 + polkadot/diagrams/bridge-relay.svg | 3 + .../diagrams/cross-chain-fund-transfer.svg | 3 + .../diagrams/currency-exchange-pallet.svg | 3 + polkadot/diagrams/ethereum-pallet.svg | 3 + polkadot/diagrams/general-overview.svg | 3 + polkadot/diagrams/parachain.svg | 3 + polkadot/docs/high-level-overview.md | 177 + polkadot/docs/high-level.html | 55 + polkadot/docs/plan.md | 22 + polkadot/docs/poa-eth.md | 71 + polkadot/docs/scenario1.html | 47 + polkadot/docs/send-message.md | 131 + polkadot/docs/testing-scenarios.md | 221 + polkadot/fuzz/storage-proof/Cargo.lock | 2362 ++++ polkadot/fuzz/storage-proof/Cargo.toml | 34 + polkadot/fuzz/storage-proof/README.md | 32 + polkadot/fuzz/storage-proof/src/main.rs | 84 + polkadot/modules/currency-exchange/Cargo.toml | 48 + .../currency-exchange/src/benchmarking.rs | 134 + polkadot/modules/currency-exchange/src/lib.rs | 496 + polkadot/modules/dispatch/Cargo.toml | 41 + polkadot/modules/dispatch/README.md | 61 + polkadot/modules/dispatch/src/lib.rs | 865 ++ .../ethereum-contract-builtin/Cargo.toml | 28 + .../ethereum-contract-builtin/src/lib.rs | 374 + polkadot/modules/ethereum/Cargo.toml | 49 + polkadot/modules/ethereum/src/benchmarking.rs | 270 + polkadot/modules/ethereum/src/error.rs | 101 + polkadot/modules/ethereum/src/finality.rs | 556 + polkadot/modules/ethereum/src/import.rs | 609 + polkadot/modules/ethereum/src/lib.rs | 1553 +++ polkadot/modules/ethereum/src/mock.rs | 192 + polkadot/modules/ethereum/src/test_utils.rs | 321 + polkadot/modules/ethereum/src/validators.rs | 476 + polkadot/modules/ethereum/src/verification.rs | 945 ++ polkadot/modules/grandpa/Cargo.toml | 59 + polkadot/modules/grandpa/src/benchmarking.rs | 272 + polkadot/modules/grandpa/src/lib.rs | 1036 ++ polkadot/modules/grandpa/src/mock.rs | 113 + polkadot/modules/grandpa/src/weights.rs | 121 + polkadot/modules/messages/Cargo.toml | 54 + polkadot/modules/messages/README.md | 391 + polkadot/modules/messages/src/benchmarking.rs | 830 ++ polkadot/modules/messages/src/inbound_lane.rs | 397 + .../modules/messages/src/instant_payments.rs | 251 + polkadot/modules/messages/src/lib.rs | 1589 +++ polkadot/modules/messages/src/mock.rs | 404 + .../modules/messages/src/outbound_lane.rs | 203 + polkadot/modules/messages/src/weights.rs | 289 + polkadot/modules/messages/src/weights_ext.rs | 319 + .../modules/shift-session-manager/Cargo.toml | 34 + .../modules/shift-session-manager/src/lib.rs | 228 + polkadot/primitives/chain-kusama/Cargo.toml | 28 + polkadot/primitives/chain-kusama/src/lib.rs | 117 + polkadot/primitives/chain-millau/Cargo.toml | 52 + polkadot/primitives/chain-millau/src/lib.rs | 359 + .../chain-millau/src/millau_hash.rs | 57 + polkadot/primitives/chain-polkadot/Cargo.toml | 29 + polkadot/primitives/chain-polkadot/src/lib.rs | 117 + polkadot/primitives/chain-rialto/Cargo.toml | 36 + polkadot/primitives/chain-rialto/src/lib.rs | 320 + polkadot/primitives/chain-rococo/Cargo.toml | 36 + polkadot/primitives/chain-rococo/src/lib.rs | 172 + polkadot/primitives/chain-westend/Cargo.toml | 36 + polkadot/primitives/chain-westend/src/lib.rs | 179 + .../primitives/currency-exchange/Cargo.toml | 25 + .../primitives/currency-exchange/src/lib.rs | 150 + polkadot/primitives/ethereum-poa/Cargo.toml | 57 + polkadot/primitives/ethereum-poa/src/lib.rs | 734 ++ .../primitives/ethereum-poa/src/signatures.rs | 143 + polkadot/primitives/header-chain/Cargo.toml | 36 + .../header-chain/src/justification.rs | 185 + polkadot/primitives/header-chain/src/lib.rs | 133 + .../header-chain/tests/justification.rs | 191 + .../primitives/message-dispatch/Cargo.toml | 18 + .../primitives/message-dispatch/src/lib.rs | 49 + polkadot/primitives/messages/Cargo.toml | 30 + polkadot/primitives/messages/src/lib.rs | 228 + .../primitives/messages/src/source_chain.rs | 192 + .../primitives/messages/src/target_chain.rs | 160 + polkadot/primitives/polkadot-core/Cargo.toml | 43 + polkadot/primitives/polkadot-core/src/lib.rs | 350 + polkadot/primitives/runtime/Cargo.toml | 41 + polkadot/primitives/runtime/src/chain.rs | 87 + polkadot/primitives/runtime/src/lib.rs | 136 + .../primitives/runtime/src/storage_proof.rs | 112 + polkadot/primitives/test-utils/Cargo.toml | 29 + polkadot/primitives/test-utils/src/keyring.rs | 96 + polkadot/primitives/test-utils/src/lib.rs | 237 + polkadot/relays/bin-ethereum/Cargo.toml | 48 + polkadot/relays/bin-ethereum/README.md | 7 + .../res/substrate-bridge-abi.json | 167 + .../res/substrate-bridge-bytecode.hex | 1 + .../res/substrate-bridge-metadata.txt | 5 + polkadot/relays/bin-ethereum/src/cli.yml | 166 + .../bin-ethereum/src/ethereum_client.rs | 653 ++ .../src/ethereum_deploy_contract.rs | 154 + .../bin-ethereum/src/ethereum_exchange.rs | 403 + .../src/ethereum_exchange_submit.rs | 114 + .../bin-ethereum/src/ethereum_sync_loop.rs | 298 + polkadot/relays/bin-ethereum/src/instances.rs | 115 + polkadot/relays/bin-ethereum/src/main.rs | 413 + .../relays/bin-ethereum/src/rialto_client.rs | 279 + .../relays/bin-ethereum/src/rpc_errors.rs | 85 + .../bin-ethereum/src/substrate_sync_loop.rs | 200 + .../bin-ethereum/src/substrate_types.rs | 76 + polkadot/relays/bin-substrate/Cargo.toml | 61 + .../relays/bin-substrate/src/chains/millau.rs | 101 + .../src/chains/millau_headers_to_rialto.rs | 53 + .../src/chains/millau_messages_to_rialto.rs | 245 + .../relays/bin-substrate/src/chains/mod.rs | 335 + .../relays/bin-substrate/src/chains/rialto.rs | 98 + .../src/chains/rialto_headers_to_millau.rs | 57 + .../src/chains/rialto_messages_to_millau.rs | 244 + .../relays/bin-substrate/src/chains/rococo.rs | 39 + .../src/chains/rococo_headers_to_westend.rs | 60 + .../bin-substrate/src/chains/westend.rs | 41 + .../src/chains/westend_headers_to_millau.rs | 62 + .../src/chains/westend_headers_to_rococo.rs | 60 + .../relays/bin-substrate/src/cli/bridge.rs | 96 + .../bin-substrate/src/cli/derive_account.rs | 102 + .../bin-substrate/src/cli/encode_call.rs | 275 + .../bin-substrate/src/cli/encode_message.rs | 106 + .../bin-substrate/src/cli/estimate_fee.rs | 128 + .../bin-substrate/src/cli/init_bridge.rs | 162 + polkadot/relays/bin-substrate/src/cli/mod.rs | 444 + .../bin-substrate/src/cli/relay_headers.rs | 110 + .../src/cli/relay_headers_and_messages.rs | 183 + .../bin-substrate/src/cli/relay_messages.rs | 71 + .../bin-substrate/src/cli/send_message.rs | 317 + .../bin-substrate/src/finality_pipeline.rs | 149 + .../bin-substrate/src/finality_target.rs | 91 + .../bin-substrate/src/headers_initialize.rs | 256 + polkadot/relays/bin-substrate/src/main.rs | 41 + .../relays/bin-substrate/src/messages_lane.rs | 209 + .../bin-substrate/src/messages_source.rs | 411 + .../bin-substrate/src/messages_target.rs | 232 + .../bin-substrate/src/on_demand_headers.rs | 255 + polkadot/relays/client-ethereum/Cargo.toml | 18 + polkadot/relays/client-ethereum/src/client.rs | 172 + polkadot/relays/client-ethereum/src/error.rs | 86 + polkadot/relays/client-ethereum/src/lib.rs | 48 + polkadot/relays/client-ethereum/src/rpc.rs | 51 + polkadot/relays/client-ethereum/src/sign.rs | 85 + polkadot/relays/client-ethereum/src/types.rs | 80 + polkadot/relays/client-kusama/Cargo.toml | 25 + polkadot/relays/client-kusama/src/lib.rs | 47 + polkadot/relays/client-millau/Cargo.toml | 25 + polkadot/relays/client-millau/src/lib.rs | 104 + polkadot/relays/client-polkadot/Cargo.toml | 25 + polkadot/relays/client-polkadot/src/lib.rs | 47 + polkadot/relays/client-rialto/Cargo.toml | 25 + polkadot/relays/client-rialto/src/lib.rs | 104 + polkadot/relays/client-rococo/Cargo.toml | 23 + polkadot/relays/client-rococo/src/lib.rs | 97 + polkadot/relays/client-substrate/Cargo.toml | 42 + polkadot/relays/client-substrate/src/chain.rs | 105 + .../relays/client-substrate/src/client.rs | 275 + polkadot/relays/client-substrate/src/error.rs | 105 + .../client-substrate/src/finality_source.rs | 135 + polkadot/relays/client-substrate/src/guard.rs | 373 + .../client-substrate/src/headers_source.rs | 108 + polkadot/relays/client-substrate/src/lib.rs | 60 + .../src/metrics/float_storage_value.rs | 82 + .../client-substrate/src/metrics/mod.rs | 23 + .../src/metrics/storage_proof_overhead.rs | 104 + polkadot/relays/client-substrate/src/rpc.rs | 53 + .../client-substrate/src/sync_header.rs | 73 + polkadot/relays/client-westend/Cargo.toml | 25 + polkadot/relays/client-westend/src/lib.rs | 97 + polkadot/relays/exchange/Cargo.toml | 16 + polkadot/relays/exchange/src/exchange.rs | 916 ++ polkadot/relays/exchange/src/exchange_loop.rs | 315 + .../exchange/src/exchange_loop_metrics.rs | 97 + polkadot/relays/exchange/src/lib.rs | 26 + polkadot/relays/finality/Cargo.toml | 21 + polkadot/relays/finality/src/finality_loop.rs | 599 + .../finality/src/finality_loop_tests.rs | 404 + polkadot/relays/finality/src/lib.rs | 53 + polkadot/relays/headers/Cargo.toml | 17 + polkadot/relays/headers/src/headers.rs | 1721 +++ polkadot/relays/headers/src/lib.rs | 33 + polkadot/relays/headers/src/sync.rs | 523 + polkadot/relays/headers/src/sync_loop.rs | 637 ++ .../relays/headers/src/sync_loop_metrics.rs | 108 + .../relays/headers/src/sync_loop_tests.rs | 594 + polkadot/relays/headers/src/sync_types.rs | 189 + polkadot/relays/messages/Cargo.toml | 19 + polkadot/relays/messages/src/lib.rs | 36 + polkadot/relays/messages/src/message_lane.rs | 52 + .../relays/messages/src/message_lane_loop.rs | 865 ++ .../messages/src/message_race_delivery.rs | 879 ++ .../relays/messages/src/message_race_loop.rs | 627 ++ .../messages/src/message_race_receiving.rs | 236 + .../messages/src/message_race_strategy.rs | 488 + polkadot/relays/messages/src/metrics.rs | 110 + polkadot/relays/utils/Cargo.toml | 25 + polkadot/relays/utils/src/initialize.rs | 95 + polkadot/relays/utils/src/lib.rs | 277 + polkadot/relays/utils/src/metrics.rs | 162 + .../utils/src/metrics/float_json_value.rs | 121 + polkadot/relays/utils/src/metrics/global.rs | 111 + polkadot/relays/utils/src/relay_loop.rs | 256 + polkadot/rustfmt.toml | 3 + polkadot/scripts/add_license.sh | 22 + polkadot/scripts/ci-cache.sh | 19 + polkadot/scripts/dump-logs.sh | 35 + polkadot/scripts/license_header | 16 + polkadot/scripts/run-eth2sub-relay.sh | 6 + polkadot/scripts/run-openethereum-node.sh | 11 + .../send-message-from-millau-rialto.sh | 37 + .../send-message-from-rialto-millau.sh | 37 + polkadot/scripts/update-weights.sh | 31 + polkadot/scripts/update_substrate.sh | 10 + 339 files changed, 71658 insertions(+) create mode 100644 polkadot/.dependabot/config.yml create mode 100644 polkadot/.dockerignore create mode 100644 polkadot/.editorconfig create mode 100644 polkadot/.github/workflows/deny.yml create mode 100644 polkadot/.github/workflows/lint.yml create mode 100644 polkadot/.github/workflows/publish-deps.yml create mode 100644 polkadot/.github/workflows/publish-docker.yml create mode 100644 polkadot/.github/workflows/rust.yml create mode 100644 polkadot/.gitignore create mode 100644 polkadot/.maintain/rialto-weight-template.hbs create mode 100644 polkadot/CODE_OF_CONDUCT.md create mode 100644 polkadot/Cargo.lock create mode 100644 polkadot/Cargo.toml create mode 100644 polkadot/Dockerfile create mode 100644 polkadot/LICENSE create mode 100644 polkadot/README.md create mode 100644 polkadot/bin/.keep create mode 100644 polkadot/bin/millau/node/Cargo.toml create mode 100644 polkadot/bin/millau/node/build.rs create mode 100644 polkadot/bin/millau/node/src/chain_spec.rs create mode 100644 polkadot/bin/millau/node/src/cli.rs create mode 100644 polkadot/bin/millau/node/src/command.rs create mode 100644 polkadot/bin/millau/node/src/lib.rs create mode 100644 polkadot/bin/millau/node/src/main.rs create mode 100644 polkadot/bin/millau/node/src/service.rs create mode 100644 polkadot/bin/millau/runtime/Cargo.toml create mode 100644 polkadot/bin/millau/runtime/build.rs create mode 100644 polkadot/bin/millau/runtime/src/lib.rs create mode 100644 polkadot/bin/millau/runtime/src/rialto_messages.rs create mode 100644 polkadot/bin/rialto/node/Cargo.toml create mode 100644 polkadot/bin/rialto/node/build.rs create mode 100644 polkadot/bin/rialto/node/src/chain_spec.rs create mode 100644 polkadot/bin/rialto/node/src/cli.rs create mode 100644 polkadot/bin/rialto/node/src/command.rs create mode 100644 polkadot/bin/rialto/node/src/main.rs create mode 100644 polkadot/bin/rialto/node/src/service.rs create mode 100644 polkadot/bin/rialto/runtime/Cargo.toml create mode 100644 polkadot/bin/rialto/runtime/build.rs create mode 100644 polkadot/bin/rialto/runtime/src/benches.rs create mode 100644 polkadot/bin/rialto/runtime/src/exchange.rs create mode 100644 polkadot/bin/rialto/runtime/src/kovan.rs create mode 100644 polkadot/bin/rialto/runtime/src/lib.rs create mode 100644 polkadot/bin/rialto/runtime/src/millau_messages.rs create mode 100644 polkadot/bin/rialto/runtime/src/rialto_poa.rs create mode 100644 polkadot/bin/runtime-common/Cargo.toml create mode 100644 polkadot/bin/runtime-common/README.md create mode 100644 polkadot/bin/runtime-common/src/lib.rs create mode 100644 polkadot/bin/runtime-common/src/messages.rs create mode 100644 polkadot/bin/runtime-common/src/messages_benchmarking.rs create mode 100644 polkadot/deny.toml create mode 100644 polkadot/deployments/BridgeDeps.Dockerfile create mode 100644 polkadot/deployments/README.md create mode 100644 polkadot/deployments/bridges/poa-rialto/Front-end.Dockerfile create mode 100644 polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json create mode 100644 polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json create mode 100644 polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json create mode 100644 polkadot/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml create mode 100644 polkadot/deployments/bridges/poa-rialto/docker-compose.yml create mode 100755 polkadot/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh create mode 100755 polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh create mode 100755 polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh create mode 100755 polkadot/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/pass create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/poa-node-config create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/poa.json create mode 100644 polkadot/deployments/bridges/poa-rialto/poa-config/reserved create mode 100644 polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json create mode 100644 polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json create mode 100644 polkadot/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json create mode 100644 polkadot/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml create mode 100644 polkadot/deployments/bridges/rialto-millau/docker-compose.yml create mode 100755 polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh create mode 100755 polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh create mode 100755 polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh create mode 100755 polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh create mode 100755 polkadot/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh create mode 100644 polkadot/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json create mode 100644 polkadot/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml create mode 100644 polkadot/deployments/bridges/westend-millau/docker-compose.yml create mode 100755 polkadot/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh create mode 100644 polkadot/deployments/dev/poa-config/keys/BridgePoa/address_book.json create mode 100644 polkadot/deployments/dev/poa-config/keys/BridgePoa/arthur.json create mode 100644 polkadot/deployments/dev/poa-config/pass create mode 100644 polkadot/deployments/dev/poa-config/poa-node-config create mode 100644 polkadot/deployments/dev/poa-config/poa.json create mode 100755 polkadot/deployments/local-scripts/bridge-entrypoint.sh create mode 100755 polkadot/deployments/local-scripts/relay-headers-rococo-to-westend.sh create mode 100755 polkadot/deployments/local-scripts/relay-headers-westend-to-rococo.sh create mode 100755 polkadot/deployments/local-scripts/relay-messages-millau-to-rialto.sh create mode 100755 polkadot/deployments/local-scripts/relay-messages-rialto-to-millau.sh create mode 100755 polkadot/deployments/local-scripts/relay-millau-to-rialto.sh create mode 100755 polkadot/deployments/local-scripts/relay-rialto-to-millau.sh create mode 100755 polkadot/deployments/local-scripts/run-millau-node.sh create mode 100755 polkadot/deployments/local-scripts/run-rialto-node.sh create mode 100755 polkadot/deployments/local-scripts/run-rococo-bob-node.sh create mode 100755 polkadot/deployments/local-scripts/run-rococo-node.sh create mode 100755 polkadot/deployments/local-scripts/run-westend-node.sh create mode 100644 polkadot/deployments/monitoring/GrafanaMatrix.Dockerfile create mode 100644 polkadot/deployments/monitoring/disabled.yml create mode 100644 polkadot/deployments/monitoring/docker-compose.yml create mode 100644 polkadot/deployments/monitoring/grafana-matrix/config.yml create mode 100644 polkadot/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml create mode 100644 polkadot/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml create mode 100644 polkadot/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml create mode 100644 polkadot/deployments/monitoring/prometheus/prometheus.yml create mode 100644 polkadot/deployments/networks/OpenEthereum.Dockerfile create mode 100644 polkadot/deployments/networks/eth-poa.yml create mode 100644 polkadot/deployments/networks/millau.yml create mode 100644 polkadot/deployments/networks/rialto.yml create mode 100644 polkadot/deployments/reverse-proxy/README.md create mode 100644 polkadot/deployments/reverse-proxy/docker-compose.yml create mode 100755 polkadot/deployments/run.sh create mode 100644 polkadot/deployments/types-millau.json create mode 100644 polkadot/deployments/types-rialto.json create mode 100755 polkadot/deployments/types/build.sh create mode 100644 polkadot/deployments/types/common.json create mode 100644 polkadot/deployments/types/millau.json create mode 100644 polkadot/deployments/types/rialto.json create mode 100644 polkadot/diagrams/ARCHITECTURE.md create mode 100644 polkadot/diagrams/bridge-architecture-diagrams.drawio create mode 100644 polkadot/diagrams/bridge-relay.svg create mode 100644 polkadot/diagrams/cross-chain-fund-transfer.svg create mode 100644 polkadot/diagrams/currency-exchange-pallet.svg create mode 100644 polkadot/diagrams/ethereum-pallet.svg create mode 100644 polkadot/diagrams/general-overview.svg create mode 100644 polkadot/diagrams/parachain.svg create mode 100644 polkadot/docs/high-level-overview.md create mode 100644 polkadot/docs/high-level.html create mode 100644 polkadot/docs/plan.md create mode 100644 polkadot/docs/poa-eth.md create mode 100644 polkadot/docs/scenario1.html create mode 100644 polkadot/docs/send-message.md create mode 100644 polkadot/docs/testing-scenarios.md create mode 100644 polkadot/fuzz/storage-proof/Cargo.lock create mode 100644 polkadot/fuzz/storage-proof/Cargo.toml create mode 100644 polkadot/fuzz/storage-proof/README.md create mode 100644 polkadot/fuzz/storage-proof/src/main.rs create mode 100644 polkadot/modules/currency-exchange/Cargo.toml create mode 100644 polkadot/modules/currency-exchange/src/benchmarking.rs create mode 100644 polkadot/modules/currency-exchange/src/lib.rs create mode 100644 polkadot/modules/dispatch/Cargo.toml create mode 100644 polkadot/modules/dispatch/README.md create mode 100644 polkadot/modules/dispatch/src/lib.rs create mode 100644 polkadot/modules/ethereum-contract-builtin/Cargo.toml create mode 100644 polkadot/modules/ethereum-contract-builtin/src/lib.rs create mode 100644 polkadot/modules/ethereum/Cargo.toml create mode 100644 polkadot/modules/ethereum/src/benchmarking.rs create mode 100644 polkadot/modules/ethereum/src/error.rs create mode 100644 polkadot/modules/ethereum/src/finality.rs create mode 100644 polkadot/modules/ethereum/src/import.rs create mode 100644 polkadot/modules/ethereum/src/lib.rs create mode 100644 polkadot/modules/ethereum/src/mock.rs create mode 100644 polkadot/modules/ethereum/src/test_utils.rs create mode 100644 polkadot/modules/ethereum/src/validators.rs create mode 100644 polkadot/modules/ethereum/src/verification.rs create mode 100644 polkadot/modules/grandpa/Cargo.toml create mode 100644 polkadot/modules/grandpa/src/benchmarking.rs create mode 100644 polkadot/modules/grandpa/src/lib.rs create mode 100644 polkadot/modules/grandpa/src/mock.rs create mode 100644 polkadot/modules/grandpa/src/weights.rs create mode 100644 polkadot/modules/messages/Cargo.toml create mode 100644 polkadot/modules/messages/README.md create mode 100644 polkadot/modules/messages/src/benchmarking.rs create mode 100644 polkadot/modules/messages/src/inbound_lane.rs create mode 100644 polkadot/modules/messages/src/instant_payments.rs create mode 100644 polkadot/modules/messages/src/lib.rs create mode 100644 polkadot/modules/messages/src/mock.rs create mode 100644 polkadot/modules/messages/src/outbound_lane.rs create mode 100644 polkadot/modules/messages/src/weights.rs create mode 100644 polkadot/modules/messages/src/weights_ext.rs create mode 100644 polkadot/modules/shift-session-manager/Cargo.toml create mode 100644 polkadot/modules/shift-session-manager/src/lib.rs create mode 100644 polkadot/primitives/chain-kusama/Cargo.toml create mode 100644 polkadot/primitives/chain-kusama/src/lib.rs create mode 100644 polkadot/primitives/chain-millau/Cargo.toml create mode 100644 polkadot/primitives/chain-millau/src/lib.rs create mode 100644 polkadot/primitives/chain-millau/src/millau_hash.rs create mode 100644 polkadot/primitives/chain-polkadot/Cargo.toml create mode 100644 polkadot/primitives/chain-polkadot/src/lib.rs create mode 100644 polkadot/primitives/chain-rialto/Cargo.toml create mode 100644 polkadot/primitives/chain-rialto/src/lib.rs create mode 100644 polkadot/primitives/chain-rococo/Cargo.toml create mode 100644 polkadot/primitives/chain-rococo/src/lib.rs create mode 100644 polkadot/primitives/chain-westend/Cargo.toml create mode 100644 polkadot/primitives/chain-westend/src/lib.rs create mode 100644 polkadot/primitives/currency-exchange/Cargo.toml create mode 100644 polkadot/primitives/currency-exchange/src/lib.rs create mode 100644 polkadot/primitives/ethereum-poa/Cargo.toml create mode 100644 polkadot/primitives/ethereum-poa/src/lib.rs create mode 100644 polkadot/primitives/ethereum-poa/src/signatures.rs create mode 100644 polkadot/primitives/header-chain/Cargo.toml create mode 100644 polkadot/primitives/header-chain/src/justification.rs create mode 100644 polkadot/primitives/header-chain/src/lib.rs create mode 100644 polkadot/primitives/header-chain/tests/justification.rs create mode 100644 polkadot/primitives/message-dispatch/Cargo.toml create mode 100644 polkadot/primitives/message-dispatch/src/lib.rs create mode 100644 polkadot/primitives/messages/Cargo.toml create mode 100644 polkadot/primitives/messages/src/lib.rs create mode 100644 polkadot/primitives/messages/src/source_chain.rs create mode 100644 polkadot/primitives/messages/src/target_chain.rs create mode 100644 polkadot/primitives/polkadot-core/Cargo.toml create mode 100644 polkadot/primitives/polkadot-core/src/lib.rs create mode 100644 polkadot/primitives/runtime/Cargo.toml create mode 100644 polkadot/primitives/runtime/src/chain.rs create mode 100644 polkadot/primitives/runtime/src/lib.rs create mode 100644 polkadot/primitives/runtime/src/storage_proof.rs create mode 100644 polkadot/primitives/test-utils/Cargo.toml create mode 100644 polkadot/primitives/test-utils/src/keyring.rs create mode 100644 polkadot/primitives/test-utils/src/lib.rs create mode 100644 polkadot/relays/bin-ethereum/Cargo.toml create mode 100644 polkadot/relays/bin-ethereum/README.md create mode 100644 polkadot/relays/bin-ethereum/res/substrate-bridge-abi.json create mode 100644 polkadot/relays/bin-ethereum/res/substrate-bridge-bytecode.hex create mode 100644 polkadot/relays/bin-ethereum/res/substrate-bridge-metadata.txt create mode 100644 polkadot/relays/bin-ethereum/src/cli.yml create mode 100644 polkadot/relays/bin-ethereum/src/ethereum_client.rs create mode 100644 polkadot/relays/bin-ethereum/src/ethereum_deploy_contract.rs create mode 100644 polkadot/relays/bin-ethereum/src/ethereum_exchange.rs create mode 100644 polkadot/relays/bin-ethereum/src/ethereum_exchange_submit.rs create mode 100644 polkadot/relays/bin-ethereum/src/ethereum_sync_loop.rs create mode 100644 polkadot/relays/bin-ethereum/src/instances.rs create mode 100644 polkadot/relays/bin-ethereum/src/main.rs create mode 100644 polkadot/relays/bin-ethereum/src/rialto_client.rs create mode 100644 polkadot/relays/bin-ethereum/src/rpc_errors.rs create mode 100644 polkadot/relays/bin-ethereum/src/substrate_sync_loop.rs create mode 100644 polkadot/relays/bin-ethereum/src/substrate_types.rs create mode 100644 polkadot/relays/bin-substrate/Cargo.toml create mode 100644 polkadot/relays/bin-substrate/src/chains/millau.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/mod.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/rialto.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/rococo.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/rococo_headers_to_westend.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/westend.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/westend_headers_to_millau.rs create mode 100644 polkadot/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/bridge.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/derive_account.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/encode_call.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/encode_message.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/estimate_fee.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/init_bridge.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/mod.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/relay_headers.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/relay_headers_and_messages.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/relay_messages.rs create mode 100644 polkadot/relays/bin-substrate/src/cli/send_message.rs create mode 100644 polkadot/relays/bin-substrate/src/finality_pipeline.rs create mode 100644 polkadot/relays/bin-substrate/src/finality_target.rs create mode 100644 polkadot/relays/bin-substrate/src/headers_initialize.rs create mode 100644 polkadot/relays/bin-substrate/src/main.rs create mode 100644 polkadot/relays/bin-substrate/src/messages_lane.rs create mode 100644 polkadot/relays/bin-substrate/src/messages_source.rs create mode 100644 polkadot/relays/bin-substrate/src/messages_target.rs create mode 100644 polkadot/relays/bin-substrate/src/on_demand_headers.rs create mode 100644 polkadot/relays/client-ethereum/Cargo.toml create mode 100644 polkadot/relays/client-ethereum/src/client.rs create mode 100644 polkadot/relays/client-ethereum/src/error.rs create mode 100644 polkadot/relays/client-ethereum/src/lib.rs create mode 100644 polkadot/relays/client-ethereum/src/rpc.rs create mode 100644 polkadot/relays/client-ethereum/src/sign.rs create mode 100644 polkadot/relays/client-ethereum/src/types.rs create mode 100644 polkadot/relays/client-kusama/Cargo.toml create mode 100644 polkadot/relays/client-kusama/src/lib.rs create mode 100644 polkadot/relays/client-millau/Cargo.toml create mode 100644 polkadot/relays/client-millau/src/lib.rs create mode 100644 polkadot/relays/client-polkadot/Cargo.toml create mode 100644 polkadot/relays/client-polkadot/src/lib.rs create mode 100644 polkadot/relays/client-rialto/Cargo.toml create mode 100644 polkadot/relays/client-rialto/src/lib.rs create mode 100644 polkadot/relays/client-rococo/Cargo.toml create mode 100644 polkadot/relays/client-rococo/src/lib.rs create mode 100644 polkadot/relays/client-substrate/Cargo.toml create mode 100644 polkadot/relays/client-substrate/src/chain.rs create mode 100644 polkadot/relays/client-substrate/src/client.rs create mode 100644 polkadot/relays/client-substrate/src/error.rs create mode 100644 polkadot/relays/client-substrate/src/finality_source.rs create mode 100644 polkadot/relays/client-substrate/src/guard.rs create mode 100644 polkadot/relays/client-substrate/src/headers_source.rs create mode 100644 polkadot/relays/client-substrate/src/lib.rs create mode 100644 polkadot/relays/client-substrate/src/metrics/float_storage_value.rs create mode 100644 polkadot/relays/client-substrate/src/metrics/mod.rs create mode 100644 polkadot/relays/client-substrate/src/metrics/storage_proof_overhead.rs create mode 100644 polkadot/relays/client-substrate/src/rpc.rs create mode 100644 polkadot/relays/client-substrate/src/sync_header.rs create mode 100644 polkadot/relays/client-westend/Cargo.toml create mode 100644 polkadot/relays/client-westend/src/lib.rs create mode 100644 polkadot/relays/exchange/Cargo.toml create mode 100644 polkadot/relays/exchange/src/exchange.rs create mode 100644 polkadot/relays/exchange/src/exchange_loop.rs create mode 100644 polkadot/relays/exchange/src/exchange_loop_metrics.rs create mode 100644 polkadot/relays/exchange/src/lib.rs create mode 100644 polkadot/relays/finality/Cargo.toml create mode 100644 polkadot/relays/finality/src/finality_loop.rs create mode 100644 polkadot/relays/finality/src/finality_loop_tests.rs create mode 100644 polkadot/relays/finality/src/lib.rs create mode 100644 polkadot/relays/headers/Cargo.toml create mode 100644 polkadot/relays/headers/src/headers.rs create mode 100644 polkadot/relays/headers/src/lib.rs create mode 100644 polkadot/relays/headers/src/sync.rs create mode 100644 polkadot/relays/headers/src/sync_loop.rs create mode 100644 polkadot/relays/headers/src/sync_loop_metrics.rs create mode 100644 polkadot/relays/headers/src/sync_loop_tests.rs create mode 100644 polkadot/relays/headers/src/sync_types.rs create mode 100644 polkadot/relays/messages/Cargo.toml create mode 100644 polkadot/relays/messages/src/lib.rs create mode 100644 polkadot/relays/messages/src/message_lane.rs create mode 100644 polkadot/relays/messages/src/message_lane_loop.rs create mode 100644 polkadot/relays/messages/src/message_race_delivery.rs create mode 100644 polkadot/relays/messages/src/message_race_loop.rs create mode 100644 polkadot/relays/messages/src/message_race_receiving.rs create mode 100644 polkadot/relays/messages/src/message_race_strategy.rs create mode 100644 polkadot/relays/messages/src/metrics.rs create mode 100644 polkadot/relays/utils/Cargo.toml create mode 100644 polkadot/relays/utils/src/initialize.rs create mode 100644 polkadot/relays/utils/src/lib.rs create mode 100644 polkadot/relays/utils/src/metrics.rs create mode 100644 polkadot/relays/utils/src/metrics/float_json_value.rs create mode 100644 polkadot/relays/utils/src/metrics/global.rs create mode 100644 polkadot/relays/utils/src/relay_loop.rs create mode 100644 polkadot/rustfmt.toml create mode 100755 polkadot/scripts/add_license.sh create mode 100755 polkadot/scripts/ci-cache.sh create mode 100644 polkadot/scripts/dump-logs.sh create mode 100644 polkadot/scripts/license_header create mode 100755 polkadot/scripts/run-eth2sub-relay.sh create mode 100755 polkadot/scripts/run-openethereum-node.sh create mode 100755 polkadot/scripts/send-message-from-millau-rialto.sh create mode 100755 polkadot/scripts/send-message-from-rialto-millau.sh create mode 100755 polkadot/scripts/update-weights.sh create mode 100755 polkadot/scripts/update_substrate.sh diff --git a/polkadot/.dependabot/config.yml b/polkadot/.dependabot/config.yml new file mode 100644 index 00000000000..61599ccba92 --- /dev/null +++ b/polkadot/.dependabot/config.yml @@ -0,0 +1,22 @@ +version: 1 +update_configs: + - package_manager: "rust:cargo" + directory: "/" + update_schedule: "weekly" + ignored_updates: + - match: + dependency_name: "sp-*" + - match: + dependency_name: "sc-*" + - match: + dependency_name: "substrate-*" + - match: + dependency_name: "frame-*" + - match: + dependency_name: "pallet-*" + - match: + dependency_name: "node-inspect" + automerged_updates: + - match: + update_type: "all" + version_requirement_updates: "auto" diff --git a/polkadot/.dockerignore b/polkadot/.dockerignore new file mode 100644 index 00000000000..f4ceea78560 --- /dev/null +++ b/polkadot/.dockerignore @@ -0,0 +1 @@ +**/target/ diff --git a/polkadot/.editorconfig b/polkadot/.editorconfig new file mode 100644 index 00000000000..d67ffe8f90f --- /dev/null +++ b/polkadot/.editorconfig @@ -0,0 +1,16 @@ +root = true +[*] +indent_style=tab +indent_size=tab +tab_width=4 +end_of_line=lf +charset=utf-8 +trim_trailing_whitespace=true +max_line_length=100 +insert_final_newline=true + +[*.{yml,md,yaml,sh}] +indent_style=space +indent_size=2 +tab_width=8 +end_of_line=lf diff --git a/polkadot/.github/workflows/deny.yml b/polkadot/.github/workflows/deny.yml new file mode 100644 index 00000000000..9f9f7264ae9 --- /dev/null +++ b/polkadot/.github/workflows/deny.yml @@ -0,0 +1,39 @@ +name: Cargo deny + +on: + pull_request: + schedule: + - cron: '0 0 * * *' + push: + branches: + - master + tags: + - v* + paths-ignore: + - '**.md' + - diagrams/* + - docs/* +jobs: + cargo-deny: + runs-on: ubuntu-latest + strategy: + matrix: + checks: + - advisories + - bans licenses sources + # Prevent sudden announcement of a new advisory from failing CI: + continue-on-error: ${{ matrix.checks == 'advisories' }} + steps: + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + - name: Checkout sources & submodules + uses: actions/checkout@master + with: + fetch-depth: 5 + submodules: recursive + - name: Cargo deny + uses: EmbarkStudios/cargo-deny-action@v1 + with: + command: check ${{ matrix.checks }} diff --git a/polkadot/.github/workflows/lint.yml b/polkadot/.github/workflows/lint.yml new file mode 100644 index 00000000000..4ebd12e0d6f --- /dev/null +++ b/polkadot/.github/workflows/lint.yml @@ -0,0 +1,43 @@ +name: Check style + +on: + pull_request: + push: + branches: + - master + tags: + - v* + paths-ignore: + - '**.md' + - diagrams/* + - docs/* + schedule: # Weekly build + - cron: '0 0 * * 0' +jobs: +## Check stage + check-fmt: + name: Check RustFmt + runs-on: ubuntu-latest + env: + RUST_BACKTRACE: full + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@master + with: + fetch-depth: 5 + submodules: recursive + + - name: Add rustfmt + run: rustup component add rustfmt + + - name: rust-fmt check + uses: actions-rs/cargo@master + with: + command: fmt + args: --all -- --check diff --git a/polkadot/.github/workflows/publish-deps.yml b/polkadot/.github/workflows/publish-deps.yml new file mode 100644 index 00000000000..16d56a5d780 --- /dev/null +++ b/polkadot/.github/workflows/publish-deps.yml @@ -0,0 +1,76 @@ +name: Publish Dependencies to Docker hub + +on: + push: + tags: + - v* + paths-ignore: + - '**.md' + - diagrams/* + - docs/* + schedule: # Weekly build + - cron: '0 0 * * 0' + +jobs: + ## Publish to Docker hub + publish: + name: Publishing + runs-on: ubuntu-latest + container: + image: docker:git + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@v2 + with: + fetch-depth: 5 + submodules: recursive + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v1 + + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Prepare + id: prep + run: | + DOCKER_IMAGE=paritytech/bridge-dependencies + VERSION=latest + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') + fi + TAGS=${DOCKER_IMAGE}:${VERSION} + TAGS=$TAGS,${DOCKER_IMAGE}:sha-${GITHUB_SHA::8} + echo ::set-output name=TAGS::${TAGS} + echo ::set-output name=DATE::$(date +%d-%m-%Y) + + - name: Build and push + uses: docker/build-push-action@v2 + with: + file: deployments/BridgeDeps.Dockerfile + push: true + cache-from: type=registry,ref=paritytech/bridge-dependencies:latest + cache-to: type=inline + tags: ${{ steps.prep.outputs.TAGS }} + labels: | + org.opencontainers.image.title=bridge-dependencies + org.opencontainers.image.description=bridge-dependencies - component of Parity Bridges Common + org.opencontainers.image.source=${{ github.event.repository.html_url }} + org.opencontainers.image.url=https://github.com/paritytech/parity-bridges-common + org.opencontainers.image.documentation=https://github.com/paritytech/parity-bridges-common/README.md + org.opencontainers.image.created=${{ steps.prep.outputs.DATE }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.authors=devops-team@parity.io + org.opencontainers.image.vendor=Parity Technologies + org.opencontainers.image.licenses=GPL-3.0 License diff --git a/polkadot/.github/workflows/publish-docker.yml b/polkadot/.github/workflows/publish-docker.yml new file mode 100644 index 00000000000..5a4670b6ea1 --- /dev/null +++ b/polkadot/.github/workflows/publish-docker.yml @@ -0,0 +1,93 @@ +name: Publish images to Docker hub + +on: + push: + tags: + - v* + paths-ignore: + - '**.md' + - diagrams/* + - docs/* + schedule: # Nightly build + - cron: '0 1 * * *' + +jobs: + ## Publish to Docker hub + publish: + name: Publishing + strategy: + matrix: + project: + - rialto-bridge-node + - millau-bridge-node + - ethereum-poa-relay + - substrate-relay + include: + - project: rialto-bridge-node + healthcheck: http://localhost:9933/health + - project: millau-bridge-node + healthcheck: http://localhost:9933/health + - project: ethereum-poa-relay + healthcheck: http://localhost:9616/metrics + - project: substrate-relay + healthcheck: http://localhost:9616/metrics + + runs-on: ubuntu-latest + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@v2 + with: + fetch-depth: 5 + submodules: recursive + + - name: Prepare + id: prep + run: | + if [[ $GITHUB_REF == refs/tags/* ]]; then + VERSION=${GITHUB_REF#refs/tags/} + elif [[ $GITHUB_REF == refs/heads/* ]]; then + VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') + fi + TAGS="${VERSION} sha-${GITHUB_SHA::8} latest" + echo ::set-output name=TAGS::${VERSION} + echo ::set-output name=TAGS::${TAGS} + echo ::set-output name=DATE::$(date +%d-%m-%Y) + + - name: Workaround rootless build + run: | + sudo apt-get install fuse-overlayfs + mkdir -vp ~/.config/containers + printf "[storage.options]\nmount_program=\"/usr/bin/fuse-overlayfs\"" > ~/.config/containers/storage.conf + + - name: Build image for ${{ matrix.project }} + uses: redhat-actions/buildah-build@v2.2 + with: + image: ${{ matrix.project }} + tags: ${{ steps.prep.outputs.TAGS }} + dockerfiles: ./Dockerfile + build-args: | + PROJECT=${{ matrix.project }} + HEALTH=${{ matrix.healthcheck }} + VCS_REF=sha-${GITHUB_SHA::8} + BUILD_DATE=${{ steps.prep.outputs.DATE }} + VERSION=${{ steps.prep.outputs.VERSION }} + + - name: Push ${{ matrix.project }} image to docker.io + id: push-to-dockerhub + uses: redhat-actions/push-to-registry@v2.1.1 + with: + registry: docker.io/paritytech + image: ${{ matrix.project }} + tags: ${{ steps.prep.outputs.TAGS }} + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Check the image + run: | + echo "New image has been pushed to ${{ steps.push-to-dockerhub.outputs.registry-path }}" diff --git a/polkadot/.github/workflows/rust.yml b/polkadot/.github/workflows/rust.yml new file mode 100644 index 00000000000..e6f7939efbc --- /dev/null +++ b/polkadot/.github/workflows/rust.yml @@ -0,0 +1,175 @@ +name: Compilation and Testing Suite + +on: + pull_request: + push: + branches: + - master + tags: + - v* + paths-ignore: + - '**.md' + - diagrams/* + - docs/* + schedule: # Weekly build + - cron: '0 0 * * 0' +jobs: + +## Check Stage + check-test: + name: Check and test + strategy: + matrix: + toolchain: + - stable + #- beta + - nightly-2021-04-10 + runs-on: ubuntu-latest + env: + RUST_BACKTRACE: full + NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@master + with: + fetch-depth: 5 + submodules: recursive + + - name: Install Toolchain + run: rustup toolchain add $NIGHTLY + + - name: Add WASM Utilities + run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY + + - name: Checking rust-${{ matrix.toolchain }} + uses: actions-rs/cargo@master + with: + command: check + toolchain: ${{ matrix.toolchain }} + args: --all --verbose + +## Test Stage + - name: Testing rust-${{ matrix.toolchain }} + uses: actions-rs/cargo@master + if: matrix.toolchain == 'stable' + with: + command: test + toolchain: ${{ matrix.toolchain }} + args: --all --verbose + +## Check Node Benchmarks + - name: Check Rialto benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }} + uses: actions-rs/cargo@master + with: + command: check + toolchain: ${{ matrix.toolchain }} + args: -p rialto-runtime --features runtime-benchmarks --verbose + + - name: Check Millau benchmarks runtime ${{ matrix.platform }} rust-${{ matrix.toolchain }} + uses: actions-rs/cargo@master + with: + command: check + toolchain: ${{ matrix.toolchain }} + args: -p millau-runtime --features runtime-benchmarks --verbose + +## Build Stage + build: + name: Build + strategy: + matrix: + toolchain: + - stable + #- beta + - nightly + runs-on: ubuntu-latest + env: + RUST_BACKTRACE: full + NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@master + with: + fetch-depth: 5 + submodules: recursive + + - name: Install Toolchain + run: rustup toolchain add $NIGHTLY + + - name: Add WASM Utilities + run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY + + - name: Building rust-${{ matrix.toolchain }} + uses: actions-rs/cargo@master + if: github.ref == 'refs/heads/master' + with: + command: build + toolchain: ${{ matrix.toolchain }} + args: --all --verbose + + - name: Prepare artifacts + if: github.ref == 'refs/heads/master' + run: | + mkdir -p ./artifacts; + mv -v target/debug/rialto-bridge-node ./artifacts/; + mv -v target/debug/millau-bridge-node ./artifacts/; + mv -v target/debug/ethereum-poa-relay ./artifacts/; + mv -v target/debug/substrate-relay ./artifacts/; + shell: bash + + - name: Upload artifacts + if: github.ref == 'refs/heads/master' + uses: actions/upload-artifact@v1 + with: + name: ${{ matrix.toolchain }}.zip + path: artifacts/ + + ## Linting Stage + clippy: + name: Clippy + runs-on: ubuntu-latest + env: + RUST_BACKTRACE: full + NIGHTLY: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. + steps: + + - name: Cancel Previous Runs + uses: styfle/cancel-workflow-action@0.4.1 + with: + access_token: ${{ github.token }} + + - name: Checkout sources & submodules + uses: actions/checkout@master + with: + fetch-depth: 5 + submodules: recursive + + - name: Install Toolchain + run: rustup toolchain add $NIGHTLY + + - name: Add WASM Utilities + run: rustup target add wasm32-unknown-unknown --toolchain $NIGHTLY + + - name: Add clippy + run: rustup component add clippy --toolchain $NIGHTLY + + - name: Rust Cache + uses: Swatinem/rust-cache@v1.2.0 + + - name: Clippy + uses: actions-rs/cargo@master + with: + command: clippy + toolchain: nightly-2021-04-10 #if necessary, specify the version, nightly-2020-10-04, etc. + args: --all-targets -- -D warnings diff --git a/polkadot/.gitignore b/polkadot/.gitignore new file mode 100644 index 00000000000..0ab08578432 --- /dev/null +++ b/polkadot/.gitignore @@ -0,0 +1,25 @@ +**/target/ +**/.env +**/.env2 +**/rust-toolchain +hfuzz_target +hfuzz_workspace +**/Cargo.lock + +**/*.rs.bk + +*.o +*.so +*.rlib +*.dll +.gdb_history + +*.exe + +.DS_Store + +.idea +.vscode +*.iml +*.swp +*.swo diff --git a/polkadot/.maintain/rialto-weight-template.hbs b/polkadot/.maintain/rialto-weight-template.hbs new file mode 100644 index 00000000000..4bf856948ae --- /dev/null +++ b/polkadot/.maintain/rialto-weight-template.hbs @@ -0,0 +1,103 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Autogenerated weights for {{pallet}} +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} +//! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}} +//! LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} +//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}} +//! CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} + +// Executed Command: +{{#each args as |arg|~}} +// {{arg}} +{{/each}} + +#![allow(clippy::all)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for {{pallet}}. +pub trait WeightInfo { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{c.name}}: u32, {{/each~}} + ) -> Weight; + {{~/each}} +} + +/// Weights for {{pallet}} using the Rialto node and recommended hardware. +pub struct RialtoWeight(PhantomData); +impl WeightInfo for RialtoWeight { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + {{~/each}} + } + {{~/each}} +} + +// For backwards compatibility and tests +impl WeightInfo for () { + {{~#each benchmarks as |benchmark|}} + fn {{benchmark.name~}} + ( + {{~#each benchmark.components as |c| ~}} + {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} + ) -> Weight { + ({{underscore benchmark.base_weight}} as Weight) + {{~#each benchmark.component_weight as |cw|}} + .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) + {{~/each}} + {{~#if (ne benchmark.base_reads "0")}} + .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) + {{~/if}} + {{~#each benchmark.component_reads as |cr|}} + .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) + {{~/each}} + {{~#if (ne benchmark.base_writes "0")}} + .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) + {{~/if}} + {{~#each benchmark.component_writes as |cw|}} + .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) + {{~/each}} + } + {{~/each}} +} diff --git a/polkadot/CODE_OF_CONDUCT.md b/polkadot/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..70541fb72fa --- /dev/null +++ b/polkadot/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers +pledge to making participation in our project and our community a harassment-free experience for +everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity +and expression, level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit + permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +### Facilitation, Not Strongarming + +We recognise that this software is merely a tool for users to create and maintain their blockchain +of preference. We see that blockchains are naturally community platforms with users being the +ultimate decision makers. We assert that good software will maximise user agency by facilitate +user-expression on the network. As such: + +- This project will strive to give users as much choice as is both reasonable and possible over what + protocol they adhere to; but +- use of the project's technical forums, commenting systems, pull requests and issue trackers as a + means to express individual protocol preferences is forbidden. + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are +expected to take appropriate and fair corrective action in response to any instances of unacceptable +behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, +code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or +to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is +representing the project or its community. Examples of representing a project or community include +using an official project e-mail address, posting via an official social media account, or acting as +an appointed representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting +the project team at admin@parity.io. All complaints will be reviewed and investigated and will +result in a response that is deemed necessary and appropriate to the circumstances. The project team +is obligated to maintain confidentiality with regard to the reporter of an incident. Further +details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face +temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock new file mode 100644 index 00000000000..5811d7ad3b1 --- /dev/null +++ b/polkadot/Cargo.lock @@ -0,0 +1,9958 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "addr2line" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aead" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "aes" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6" +dependencies = [ + "aes-soft", + "aesni", + "block-cipher", +] + +[[package]] +name = "aes-gcm" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f" +dependencies = [ + "aead", + "aes", + "block-cipher", + "ghash", + "subtle 2.4.0", +] + +[[package]] +name = "aes-soft" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6" +dependencies = [ + "block-cipher", + "byteorder", + "opaque-debug 0.3.0", +] + +[[package]] +name = "aesni" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a" +dependencies = [ + "block-cipher", + "opaque-debug 0.3.0", +] + +[[package]] +name = "ahash" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" + +[[package]] +name = "aho-corasick" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "anyhow" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" + +[[package]] +name = "approx" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" +dependencies = [ + "num-traits", +] + +[[package]] +name = "arbitrary" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b" + +[[package]] +name = "array_tool" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f8cb5d814eb646a863c4f24978cff2880c4be96ad8cde2c0f0678732902e271" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "asn1_der" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" +dependencies = [ + "asn1_der_derive", +] + +[[package]] +name = "asn1_der_derive" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "async-channel" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] + +[[package]] +name = "async-executor" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "once_cell", + "vec-arena", +] + +[[package]] +name = "async-global-executor" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-mutex", + "blocking", + "futures-lite", + "num_cpus", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" +dependencies = [ + "concurrent-queue", + "fastrand", + "futures-lite", + "libc", + "log", + "nb-connect", + "once_cell", + "parking", + "polling", + "vec-arena", + "waker-fn", + "winapi 0.3.9", +] + +[[package]] +name = "async-lock" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-mutex" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-process" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef37b86e2fa961bae5a4d212708ea0154f904ce31d1a4a7f47e1bbc33a0c040b" +dependencies = [ + "async-io", + "blocking", + "cfg-if 1.0.0", + "event-listener", + "futures-lite", + "once_cell", + "signal-hook", + "winapi 0.3.9", +] + +[[package]] +name = "async-std" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" +dependencies = [ + "async-channel", + "async-global-executor", + "async-io", + "async-lock", + "async-process", + "crossbeam-utils 0.8.3", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "num_cpus", + "once_cell", + "pin-project-lite 0.2.4", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-std-resolver" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665c56111e244fe38e7708ee10948a4356ad6a548997c21f5a63a0f4e0edc4d" +dependencies = [ + "async-std", + "async-trait", + "futures-io", + "futures-util", + "pin-utils", + "trust-dns-resolver", +] + +[[package]] +name = "async-task" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" + +[[package]] +name = "async-tls" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400" +dependencies = [ + "futures-core", + "futures-io", + "rustls 0.19.0", + "webpki 0.21.4", + "webpki-roots", +] + +[[package]] +name = "async-trait" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "asynchronous-codec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb4401f0a3622dad2e0763fa79e0eb328bc70fb7dccfdd645341f00d671247d6" +dependencies = [ + "bytes 1.0.1", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite 0.2.4", +] + +[[package]] +name = "asynchronous-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" +dependencies = [ + "bytes 1.0.1", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite 0.2.4", +] + +[[package]] +name = "atomic" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +dependencies = [ + "autocfg", +] + +[[package]] +name = "atomic-waker" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "backoff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721c249ab59cbc483ad4294c9ee2671835c1e43e9ffc277e6b4ecfef733cfdc5" +dependencies = [ + "instant", + "rand 0.7.3", +] + +[[package]] +name = "backtrace" +version = "0.3.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +dependencies = [ + "addr2line", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base-x" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" + +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "base64" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" + +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + +[[package]] +name = "beef" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409" + +[[package]] +name = "bincode" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772" +dependencies = [ + "byteorder", + "serde", +] + +[[package]] +name = "bindgen" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" +dependencies = [ + "bitflags", + "cexpr", + "cfg-if 0.1.10", + "clang-sys", + "clap", + "env_logger 0.7.1", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "which 3.1.1", +] + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bitvec" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium 0.3.0", +] + +[[package]] +name = "bitvec" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d" +dependencies = [ + "funty", + "radium 0.6.2", + "tap", + "wyz", +] + +[[package]] +name = "blake2" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + +[[package]] +name = "blake2b_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "constant_time_eq", +] + +[[package]] +name = "blake2s_simd" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "constant_time_eq", +] + +[[package]] +name = "blake3" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "cc", + "cfg-if 0.1.10", + "constant_time_eq", + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding 0.1.5", + "byte-tools", + "byteorder", + "generic-array 0.12.3", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "block-padding 0.2.1", + "generic-array 0.14.4", +] + +[[package]] +name = "block-cipher" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + +[[package]] +name = "blocking" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" +dependencies = [ + "async-channel", + "async-task", + "atomic-waker", + "fastrand", + "futures-lite", + "once_cell", +] + +[[package]] +name = "bp-currency-exchange" +version = "0.1.0" +dependencies = [ + "frame-support", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-std", +] + +[[package]] +name = "bp-eth-poa" +version = "0.1.0" +dependencies = [ + "ethbloom 0.10.0", + "fixed-hash", + "hash-db", + "hex-literal 0.2.1", + "impl-rlp", + "impl-serde", + "libsecp256k1", + "parity-bytes", + "parity-scale-codec 2.0.1", + "plain_hasher", + "primitive-types", + "rlp", + "serde", + "serde-big-array", + "sp-api", + "sp-io", + "sp-runtime", + "sp-std", + "triehash", +] + +[[package]] +name = "bp-header-chain" +version = "0.1.0" +dependencies = [ + "bp-test-utils", + "finality-grandpa 0.14.0", + "frame-support", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-kusama" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "sp-api", + "sp-std", +] + +[[package]] +name = "bp-message-dispatch" +version = "0.1.0" +dependencies = [ + "bp-runtime", + "parity-scale-codec 2.0.1", +] + +[[package]] +name = "bp-messages" +version = "0.1.0" +dependencies = [ + "bp-runtime", + "frame-support", + "frame-system", + "parity-scale-codec 2.0.1", + "sp-std", +] + +[[package]] +name = "bp-millau" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-runtime", + "fixed-hash", + "frame-support", + "frame-system", + "hash256-std-hasher", + "impl-codec", + "impl-serde", + "parity-util-mem", + "serde", + "sp-api", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-trie", +] + +[[package]] +name = "bp-polkadot" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "sp-api", + "sp-std", +] + +[[package]] +name = "bp-polkadot-core" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-runtime", + "frame-support", + "frame-system", + "hex", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-core", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "bp-rialto" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-runtime", + "frame-support", + "frame-system", + "sp-api", + "sp-core", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-rococo" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "bp-runtime" +version = "0.1.0" +dependencies = [ + "frame-support", + "hash-db", + "num-traits", + "parity-scale-codec 2.0.1", + "sp-core", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", +] + +[[package]] +name = "bp-test-utils" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "ed25519-dalek", + "finality-grandpa 0.14.0", + "parity-scale-codec 2.0.1", + "sp-application-crypto", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-westend" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-messages", + "bp-polkadot-core", + "bp-runtime", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "bridge-runtime-common" +version = "0.1.0" +dependencies = [ + "bp-message-dispatch", + "bp-messages", + "bp-runtime", + "ed25519-dalek", + "frame-support", + "hash-db", + "pallet-bridge-dispatch", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "parity-scale-codec 2.0.1", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", +] + +[[package]] +name = "bs58" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" + +[[package]] +name = "bstr" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d" +dependencies = [ + "memchr", +] + +[[package]] +name = "bumpalo" +version = "3.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" + +[[package]] +name = "byte-slice-cast" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65c1bf4a04a88c54f589125563643d773f3254b5c38571395e2b591c693bbc81" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "bytes" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" +dependencies = [ + "byteorder", + "either", + "iovec", +] + +[[package]] +name = "bytes" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" + +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + +[[package]] +name = "cache-padded" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" + +[[package]] +name = "cc" +version = "1.0.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cexpr" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chacha20" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845" +dependencies = [ + "stream-cipher", + "zeroize", +] + +[[package]] +name = "chacha20poly1305" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5" +dependencies = [ + "aead", + "chacha20", + "poly1305", + "stream-cipher", + "zeroize", +] + +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "time 0.1.44", + "winapi 0.3.9", +] + +[[package]] +name = "cid" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff0e3bc0b6446b3f9663c1a6aba6ef06c5aeaa1bc92bd18077be337198ab9768" +dependencies = [ + "multibase", + "multihash", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "cipher" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "clang-sys" +version = "0.29.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "2.33.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002" +dependencies = [ + "ansi_term 0.11.0", + "atty", + "bitflags", + "strsim", + "textwrap", + "unicode-width", + "vec_map", + "yaml-rust", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "concurrent-queue" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +dependencies = [ + "cache-padded", +] + +[[package]] +name = "const_fn" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28b9d6de7f49e22cf97ad17fc4036ece69300032f45f78f30b4a4482cdc3f4a6" + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "core-foundation" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +dependencies = [ + "core-foundation-sys 0.7.0", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" + +[[package]] +name = "core-foundation-sys" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + +[[package]] +name = "cpp_demangle" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390" +dependencies = [ + "cfg-if 1.0.0", + "glob", +] + +[[package]] +name = "cpuid-bool" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" + +[[package]] +name = "cpuid-bool" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" + +[[package]] +name = "cranelift-bforest" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcee7a5107071484772b89fdf37f0f460b7db75f476e43ea7a684fd942470bcf" +dependencies = [ + "cranelift-entity", +] + +[[package]] +name = "cranelift-codegen" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "654ab96f0f1cab71c0d323618a58360a492da2c341eb2c1f977fc195c664001b" +dependencies = [ + "byteorder", + "cranelift-bforest", + "cranelift-codegen-meta", + "cranelift-codegen-shared", + "cranelift-entity", + "gimli", + "log", + "regalloc", + "serde", + "smallvec 1.6.1", + "target-lexicon", + "thiserror", +] + +[[package]] +name = "cranelift-codegen-meta" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65994cfc5be9d5fd10c5fc30bcdddfa50c04bb79c91329287bff846434ff8f14" +dependencies = [ + "cranelift-codegen-shared", + "cranelift-entity", +] + +[[package]] +name = "cranelift-codegen-shared" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "889d720b688b8b7df5e4903f9b788c3c59396050f5548e516e58ccb7312463ab" +dependencies = [ + "serde", +] + +[[package]] +name = "cranelift-entity" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a2e6884a363e42a9ba980193ea8603a4272f8a92bd8bbaf9f57a94dbea0ff96" +dependencies = [ + "serde", +] + +[[package]] +name = "cranelift-frontend" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6f41e2f9b57d2c030e249d0958f1cdc2c3cd46accf8c0438b3d1944e9153444" +dependencies = [ + "cranelift-codegen", + "log", + "smallvec 1.6.1", + "target-lexicon", +] + +[[package]] +name = "cranelift-native" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aab70ba7575665375d31cbdea2462916ce58be887834e1b83c860b43b51af637" +dependencies = [ + "cranelift-codegen", + "target-lexicon", +] + +[[package]] +name = "cranelift-wasm" +version = "0.71.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fc3d2e70da6439adf97648dcdf81834363154f2907405345b6fbe7ca38918c" +dependencies = [ + "cranelift-codegen", + "cranelift-entity", + "cranelift-frontend", + "itertools 0.10.0", + "log", + "serde", + "smallvec 1.6.1", + "thiserror", + "wasmparser", +] + +[[package]] +name = "crc32fast" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.3", +] + +[[package]] +name = "crossbeam-deque" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" +dependencies = [ + "crossbeam-epoch 0.8.2", + "crossbeam-utils 0.7.2", + "maybe-uninit", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-epoch 0.9.3", + "crossbeam-utils 0.8.3", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "lazy_static", + "maybe-uninit", + "memoffset 0.5.6", + "scopeguard", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +dependencies = [ + "cfg-if 1.0.0", + "crossbeam-utils 0.8.3", + "lazy_static", + "memoffset 0.6.1", + "scopeguard", +] + +[[package]] +name = "crossbeam-queue" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" +dependencies = [ + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", + "maybe-uninit", +] + +[[package]] +name = "crossbeam-utils" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" +dependencies = [ + "autocfg", + "cfg-if 0.1.10", + "lazy_static", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +dependencies = [ + "autocfg", + "cfg-if 1.0.0", + "lazy_static", +] + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array 0.12.3", + "subtle 1.0.0", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.4.0", +] + +[[package]] +name = "ct-logs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c8e13110a84b6315df212c045be706af261fd364791cad863285439ebba672e" +dependencies = [ + "sct", +] + +[[package]] +name = "ctor" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8f45d9ad417bcef4817d614a501ab55cdd96a6fdb24f49aab89a54acfd66b19" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "cuckoofilter" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" +dependencies = [ + "byteorder", + "fnv", + "rand 0.7.3", +] + +[[package]] +name = "curl" +version = "0.4.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a872858e9cb9e3b96c80dd78774ad9e32e44d3b05dc31e142b858d14aebc82c" +dependencies = [ + "curl-sys", + "libc", + "openssl-probe", + "openssl-sys", + "schannel", + "socket2 0.3.19", + "winapi 0.3.9", +] + +[[package]] +name = "curl-sys" +version = "0.4.41+curl-7.75.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ec466abd277c7cab2905948f3e94d10bc4963f1f5d47921c1cc4ffd2028fe65" +dependencies = [ + "cc", + "libc", + "libnghttp2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", + "winapi 0.3.9", +] + +[[package]] +name = "curve25519-dalek" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" +dependencies = [ + "byteorder", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "data-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" + +[[package]] +name = "data-encoding-macro" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a94feec3d2ba66c0b6621bca8bc6f68415b1e5c69af3586fdd0af9fd9f29b17" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f83e699727abca3c56e187945f303389590305ab2f0185ea445aa66e8d5f2a" +dependencies = [ + "data-encoding", + "syn", +] + +[[package]] +name = "derive_more" +version = "0.99.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.3", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "directories" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "directories-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" +dependencies = [ + "cfg-if 1.0.0", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" +dependencies = [ + "libc", + "redox_users 0.3.5", + "winapi 0.3.9", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users 0.4.0", + "winapi 0.3.9", +] + +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + +[[package]] +name = "dns-parser" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" +dependencies = [ + "byteorder", + "quick-error 1.2.3", +] + +[[package]] +name = "doc-comment" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" + +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dyn-clone" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" + +[[package]] +name = "ed25519" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.0.2", + "ed25519", + "rand 0.7.3", + "serde", + "sha2 0.9.3", + "zeroize", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "encoding_rs" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "enum-as-inner" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "env_logger" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" +dependencies = [ + "atty", + "humantime 1.3.0", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "env_logger" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" +dependencies = [ + "atty", + "humantime 2.1.0", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "environmental" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" + +[[package]] +name = "erased-serde" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0465971a8cc1fa2455c8465aaa377131e1f1cf4983280f474a13e68793aa770c" +dependencies = [ + "serde", +] + +[[package]] +name = "errno" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa68f2fb9cae9d37c9b2b3584aba698a2e97f72d7aef7b9f7aa71d8b54ce46fe" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14ca354e36190500e1e1fb267c647932382b54053c50b14970856c0b00a35067" +dependencies = [ + "gcc", + "libc", +] + +[[package]] +name = "ethabi" +version = "14.0.0" +source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" +dependencies = [ + "anyhow", + "ethereum-types", + "hex", + "serde", + "serde_json", + "sha3", + "thiserror", + "uint", +] + +[[package]] +name = "ethabi-contract" +version = "11.0.0" +source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" + +[[package]] +name = "ethabi-derive" +version = "14.0.0" +source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553" +dependencies = [ + "anyhow", + "ethabi", + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "ethbloom" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22a621dcebea74f2a6f2002d0a885c81ccf6cbdf86760183316a7722b5707ca4" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-rlp", + "tiny-keccak", +] + +[[package]] +name = "ethbloom" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "779864b9c7f7ead1f092972c3257496c6a84b46dba2ce131dd8a282cb2cc5972" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-rlp", + "impl-serde", + "tiny-keccak", +] + +[[package]] +name = "ethereum-contract-builtin" +version = "0.1.0" +dependencies = [ + "ethereum-types", + "finality-grandpa 0.14.0", + "hex", + "log", + "parity-scale-codec 2.0.1", + "rialto-runtime", + "sc-finality-grandpa", + "sp-blockchain", + "sp-core", + "sp-finality-grandpa", + "sp-runtime", +] + +[[package]] +name = "ethereum-poa-relay" +version = "0.1.0" +dependencies = [ + "ansi_term 0.12.1", + "async-std", + "async-trait", + "bp-currency-exchange", + "bp-eth-poa", + "clap", + "env_logger 0.8.3", + "ethabi", + "ethabi-contract", + "ethabi-derive", + "exchange-relay", + "frame-system", + "futures 0.3.13", + "headers-relay", + "hex", + "hex-literal 0.3.1", + "libsecp256k1", + "log", + "messages-relay", + "num-traits", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-ethereum-client", + "relay-rialto-client", + "relay-substrate-client", + "relay-utils", + "rialto-runtime", + "serde", + "serde_json", + "sp-core", + "sp-keyring", + "sp-runtime", + "substrate-prometheus-endpoint", + "time 0.2.25", +] + +[[package]] +name = "ethereum-types" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f64b5df66a228d85e4b17e5d6c6aa43b0310898ffe8a85988c4c032357aaabfd" +dependencies = [ + "ethbloom 0.11.0", + "fixed-hash", + "impl-rlp", + "impl-serde", + "primitive-types", + "uint", +] + +[[package]] +name = "event-listener" +version = "2.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" + +[[package]] +name = "exchange-relay" +version = "0.1.0" +dependencies = [ + "async-std", + "async-trait", + "backoff", + "futures 0.3.13", + "log", + "num-traits", + "parking_lot 0.11.1", + "relay-utils", +] + +[[package]] +name = "exit-future" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" +dependencies = [ + "futures 0.3.13", +] + +[[package]] +name = "failure" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" +dependencies = [ + "backtrace", + "failure_derive", +] + +[[package]] +name = "failure_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fastrand" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" +dependencies = [ + "instant", +] + +[[package]] +name = "fdlimit" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c4c9e43643f5a3be4ca5b67d26b98031ff9db6806c3440ae32e02e3ceac3f1b" +dependencies = [ + "libc", +] + +[[package]] +name = "file-per-thread-logger" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" +dependencies = [ + "env_logger 0.7.1", + "log", +] + +[[package]] +name = "finality-grandpa" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" +dependencies = [ + "either", + "futures 0.3.13", + "futures-timer 2.0.2", + "log", + "num-traits", + "parity-scale-codec 1.3.7", + "parking_lot 0.9.0", +] + +[[package]] +name = "finality-grandpa" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" +dependencies = [ + "either", + "futures 0.3.13", + "futures-timer 3.0.2", + "log", + "num-traits", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", +] + +[[package]] +name = "finality-relay" +version = "0.1.0" +dependencies = [ + "async-std", + "async-trait", + "backoff", + "bp-header-chain", + "futures 0.3.13", + "headers-relay", + "log", + "num-traits", + "parking_lot 0.11.1", + "relay-utils", +] + +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.3", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fixedbitset" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" + +[[package]] +name = "flate2" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" +dependencies = [ + "cfg-if 1.0.0", + "crc32fast", + "libc", + "libz-sys", + "miniz_oxide", +] + +[[package]] +name = "flume" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "531a685ab99b8f60a271b44d5dd1a76e55124a8c9fa0407b7a8e9cd172d5b588" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project 1.0.5", + "spinning_top", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "fork-tree" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", +] + +[[package]] +name = "form_urlencoded" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding 2.1.0", +] + +[[package]] +name = "frame-benchmarking" +version = "3.1.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "linregress", + "log", + "parity-scale-codec 2.0.1", + "paste 1.0.4", + "sp-api", + "sp-io", + "sp-runtime", + "sp-runtime-interface", + "sp-std", + "sp-storage", +] + +[[package]] +name = "frame-benchmarking-cli" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "Inflector", + "chrono", + "frame-benchmarking", + "handlebars", + "parity-scale-codec 2.0.1", + "sc-cli", + "sc-client-db", + "sc-executor", + "sc-service", + "serde", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime", + "sp-state-machine", + "structopt", +] + +[[package]] +name = "frame-executive" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec 2.0.1", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-tracing", +] + +[[package]] +name = "frame-metadata" +version = "13.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-std", +] + +[[package]] +name = "frame-support" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "bitflags", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec 2.0.1", + "paste 1.0.4", + "serde", + "smallvec 1.6.1", + "sp-arithmetic", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-state-machine", + "sp-std", + "sp-tracing", +] + +[[package]] +name = "frame-support-procedural" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "Inflector", + "frame-support-procedural-tools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-system" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "impl-trait-for-tuples", + "log", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "frame-system-rpc-runtime-api" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-api", +] + +[[package]] +name = "fs-swap" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5839fda247e24ca4919c87c71dd5ca658f1f39e4f06829f80e3f15c3bafcfc2c" +dependencies = [ + "lazy_static", + "libc", + "libloading", + "winapi 0.3.9", +] + +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +dependencies = [ + "bitflags", + "fuchsia-zircon-sys", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" + +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + +[[package]] +name = "futures" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" + +[[package]] +name = "futures" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94" + +[[package]] +name = "futures-cpupool" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" +dependencies = [ + "futures 0.1.31", + "num_cpus", +] + +[[package]] +name = "futures-diagnose" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" +dependencies = [ + "futures 0.1.31", + "futures 0.3.13", + "lazy_static", + "log", + "parking_lot 0.9.0", + "pin-project 0.4.27", + "serde", + "serde_json", +] + +[[package]] +name = "futures-executor" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59" + +[[package]] +name = "futures-lite" +version = "1.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite 0.2.4", + "waker-fn", +] + +[[package]] +name = "futures-macro" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-rustls" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b" +dependencies = [ + "futures-io", + "rustls 0.19.0", + "webpki 0.21.4", +] + +[[package]] +name = "futures-sink" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3" + +[[package]] +name = "futures-task" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80" + +[[package]] +name = "futures-timer" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" + +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + +[[package]] +name = "futures-util" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1" +dependencies = [ + "futures 0.1.31", + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite 0.2.4", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "gcc" +version = "0.3.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", +] + +[[package]] +name = "ghash" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375" +dependencies = [ + "opaque-debug 0.3.0", + "polyval", +] + +[[package]] +name = "gimli" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" +dependencies = [ + "fallible-iterator", + "indexmap", + "stable_deref_trait", +] + +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "globset" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a" +dependencies = [ + "aho-corasick", + "bstr", + "fnv", + "log", + "regex", +] + +[[package]] +name = "gloo-timers" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "h2" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" +dependencies = [ + "byteorder", + "bytes 0.4.12", + "fnv", + "futures 0.1.31", + "http 0.1.21", + "indexmap", + "log", + "slab", + "string", + "tokio-io", +] + +[[package]] +name = "h2" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535" +dependencies = [ + "bytes 0.5.6", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.3", + "indexmap", + "slab", + "tokio 0.2.25", + "tokio-util", + "tracing", + "tracing-futures", +] + +[[package]] +name = "handlebars" +version = "3.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb0867bbc5a3da37a753e78021d5fcf8a4db00e18dd2dd90fd36e24190e162d" +dependencies = [ + "log", + "pest", + "pest_derive", + "quick-error 2.0.0", + "serde", + "serde_json", +] + +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +dependencies = [ + "ahash", +] + +[[package]] +name = "headers-relay" +version = "0.1.0" +dependencies = [ + "async-std", + "async-trait", + "backoff", + "futures 0.3.13", + "linked-hash-map", + "log", + "num-traits", + "parking_lot 0.11.1", + "relay-utils", +] + +[[package]] +name = "heck" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-literal" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" +dependencies = [ + "hex-literal-impl", + "proc-macro-hack", +] + +[[package]] +name = "hex-literal" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" + +[[package]] +name = "hex-literal-impl" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac 0.7.0", + "digest 0.8.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac-drbg" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +dependencies = [ + "digest 0.8.1", + "generic-array 0.12.3", + "hmac 0.7.1", +] + +[[package]] +name = "honggfuzz" +version = "0.5.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi 0.3.9", +] + +[[package]] +name = "http" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0" +dependencies = [ + "bytes 0.4.12", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" +dependencies = [ + "bytes 1.0.1", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "http 0.1.21", + "tokio-buf", +] + +[[package]] +name = "http-body" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" +dependencies = [ + "bytes 0.5.6", + "http 0.2.3", +] + +[[package]] +name = "httparse" +version = "1.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691" + +[[package]] +name = "httpdate" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +dependencies = [ + "quick-error 1.2.3", +] + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "0.12.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c843caf6296fc1f93444735205af9ed4e109a539005abb2564ae1d6fad34c52" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "futures-cpupool", + "h2 0.1.26", + "http 0.1.21", + "http-body 0.1.0", + "httparse", + "iovec", + "itoa", + "log", + "net2", + "rustc_version", + "time 0.1.44", + "tokio 0.1.22", + "tokio-buf", + "tokio-executor", + "tokio-io", + "tokio-reactor", + "tokio-tcp", + "tokio-threadpool", + "tokio-timer", + "want 0.2.0", +] + +[[package]] +name = "hyper" +version = "0.13.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" +dependencies = [ + "bytes 0.5.6", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.2.7", + "http 0.2.3", + "http-body 0.3.1", + "httparse", + "httpdate", + "itoa", + "pin-project 1.0.5", + "socket2 0.3.19", + "tokio 0.2.25", + "tower-service", + "tracing", + "want 0.3.0", +] + +[[package]] +name = "hyper-rustls" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6" +dependencies = [ + "bytes 0.5.6", + "ct-logs", + "futures-util", + "hyper 0.13.10", + "log", + "rustls 0.18.1", + "rustls-native-certs", + "tokio 0.2.25", + "tokio-rustls", + "webpki 0.21.4", +] + +[[package]] +name = "idna" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "idna" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "if-addrs" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" +dependencies = [ + "if-addrs-sys", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "if-addrs-sys" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "if-watch" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6d52908d4ea4ab2bc22474ba149bf1011c8e2c3ebc1ff593ae28ac44f494b6" +dependencies = [ + "async-io", + "futures 0.3.13", + "futures-lite", + "if-addrs", + "ipnet", + "libc", + "log", + "winapi 0.3.9", +] + +[[package]] +name = "impl-codec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" +dependencies = [ + "parity-scale-codec 2.0.1", +] + +[[package]] +name = "impl-rlp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" +dependencies = [ + "rlp", +] + +[[package]] +name = "impl-serde" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5dacb10c5b3bb92d46ba347505a9041e676bb20ad220101326bffb0c93031ee" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "indexmap" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b" +dependencies = [ + "autocfg", + "hashbrown", + "serde", +] + +[[package]] +name = "instant" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "integer-sqrt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] + +[[package]] +name = "intervalier" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" +dependencies = [ + "futures 0.3.13", + "futures-timer 2.0.2", +] + +[[package]] +name = "iovec" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +dependencies = [ + "libc", +] + +[[package]] +name = "ip_network" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ee15951c035f79eddbef745611ec962f63f4558f1dadf98ab723cc603487c6f" + +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi 0.3.9", + "winreg", +] + +[[package]] +name = "ipnet" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" + +[[package]] +name = "isahc" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b24d2aed6bbe6faeab0e164ec2e9e6193fcfcfe489b6eb59fb0d0d34947d73" +dependencies = [ + "crossbeam-utils 0.8.3", + "curl", + "curl-sys", + "encoding_rs", + "flume", + "futures-lite", + "http 0.2.3", + "log", + "mime", + "once_cell", + "polling", + "slab", + "sluice", + "tracing", + "tracing-futures", + "url 2.2.1", + "waker-fn", +] + +[[package]] +name = "itertools" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + +[[package]] +name = "jobserver" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "jsonpath_lib" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61352ec23883402b7d30b3313c16cbabefb8907361c4eb669d990cbb87ceee5a" +dependencies = [ + "array_tool", + "env_logger 0.7.1", + "log", + "serde", + "serde_json", +] + +[[package]] +name = "jsonrpc-client-transports" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" +dependencies = [ + "failure", + "futures 0.1.31", + "jsonrpc-core 15.1.0", + "jsonrpc-pubsub", + "log", + "serde", + "serde_json", + "url 1.7.2", +] + +[[package]] +name = "jsonrpc-core" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" +dependencies = [ + "futures 0.1.31", + "log", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "jsonrpc-core" +version = "17.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07569945133257ff557eb37b015497104cea61a2c9edaf126c1cbd6e8332397f" +dependencies = [ + "futures 0.3.13", + "log", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "jsonrpc-core-client" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db" +dependencies = [ + "jsonrpc-client-transports", +] + +[[package]] +name = "jsonrpc-derive" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" +dependencies = [ + "proc-macro-crate 0.1.5", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "jsonrpc-http-server" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" +dependencies = [ + "hyper 0.12.36", + "jsonrpc-core 15.1.0", + "jsonrpc-server-utils", + "log", + "net2", + "parking_lot 0.10.2", + "unicase", +] + +[[package]] +name = "jsonrpc-ipc-server" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf50e53e4eea8f421a7316c5f63e395f7bc7c4e786a6dc54d76fab6ff7aa7ce7" +dependencies = [ + "jsonrpc-core 15.1.0", + "jsonrpc-server-utils", + "log", + "parity-tokio-ipc", + "parking_lot 0.10.2", + "tokio-service", +] + +[[package]] +name = "jsonrpc-pubsub" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77" +dependencies = [ + "jsonrpc-core 15.1.0", + "log", + "parking_lot 0.10.2", + "rand 0.7.3", + "serde", +] + +[[package]] +name = "jsonrpc-server-utils" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176" +dependencies = [ + "bytes 0.4.12", + "globset", + "jsonrpc-core 15.1.0", + "lazy_static", + "log", + "tokio 0.1.22", + "tokio-codec", + "unicase", +] + +[[package]] +name = "jsonrpc-ws-server" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6596fe75209b73a2a75ebe1dce4e60e03b88a2b25e8807b667597f6315150d22" +dependencies = [ + "jsonrpc-core 15.1.0", + "jsonrpc-server-utils", + "log", + "parity-ws", + "parking_lot 0.10.2", + "slab", +] + +[[package]] +name = "jsonrpsee-proc-macros" +version = "0.2.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0cbaee9ca6440e191545a68c7bf28db0ff918359a904e37a6e7cf7edd132f5a" +dependencies = [ + "Inflector", + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.2.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4ce2de6884fb4abee16eca02329a1eec1eb8df8aed751a8e929083820c78ce7" +dependencies = [ + "async-trait", + "beef", + "futures-channel", + "futures-util", + "log", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonrpsee-ws-client" +version = "0.2.0-alpha.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03ece6acc5ef1e7877fd53887e8937b273466713dc8c017a32223c2b9b678d63" +dependencies = [ + "async-std", + "async-tls", + "async-trait", + "fnv", + "futures 0.3.13", + "jsonrpsee-types", + "log", + "pin-project 1.0.5", + "serde", + "serde_json", + "soketto", + "thiserror", + "url 2.2.1", + "webpki 0.22.0", +] + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "kvdb" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8891bd853eff90e33024195d79d578dc984c82f9e0715fcd2b525a0c19d52811" +dependencies = [ + "parity-util-mem", + "smallvec 1.6.1", +] + +[[package]] +name = "kvdb-memorydb" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30a0da8e08caf08d384a620ec19bb6c9b85c84137248e202617fb91881f25912" +dependencies = [ + "kvdb", + "parity-util-mem", + "parking_lot 0.11.1", +] + +[[package]] +name = "kvdb-rocksdb" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34446c373ccc494c2124439281c198c7636ccdc2752c06722bbffd56d459c1e4" +dependencies = [ + "fs-swap", + "kvdb", + "log", + "num_cpus", + "owning_ref", + "parity-util-mem", + "parking_lot 0.11.1", + "regex", + "rocksdb", + "smallvec 1.6.1", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "leb128" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" + +[[package]] +name = "libc" +version = "0.2.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7282d924be3275cec7f6756ff4121987bc6481325397dde6ba3e7802b1a8b1c" + +[[package]] +name = "libloading" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" +dependencies = [ + "cc", + "winapi 0.3.9", +] + +[[package]] +name = "libm" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" + +[[package]] +name = "libnghttp2-sys" +version = "0.1.6+1.43.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0af55541a8827e138d59ec9e5877fb6095ece63fb6f4da45e7491b4fbd262855" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "libp2p" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe5759b526f75102829c15e4d8566603b4bf502ed19b5f35920d98113873470d" +dependencies = [ + "atomic", + "bytes 1.0.1", + "futures 0.3.13", + "lazy_static", + "libp2p-core", + "libp2p-deflate", + "libp2p-dns", + "libp2p-floodsub", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-kad", + "libp2p-mdns", + "libp2p-mplex", + "libp2p-noise", + "libp2p-ping", + "libp2p-plaintext", + "libp2p-pnet", + "libp2p-relay", + "libp2p-request-response", + "libp2p-swarm", + "libp2p-swarm-derive", + "libp2p-tcp", + "libp2p-uds", + "libp2p-wasm-ext", + "libp2p-websocket", + "libp2p-yamux", + "parity-multiaddr", + "parking_lot 0.11.1", + "pin-project 1.0.5", + "smallvec 1.6.1", + "wasm-timer", +] + +[[package]] +name = "libp2p-core" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e1797734bbd4c453664fefb029628f77c356ffc5bce98f06b18a7db3ebb0f7" +dependencies = [ + "asn1_der", + "bs58", + "ed25519-dalek", + "either", + "fnv", + "futures 0.3.13", + "futures-timer 3.0.2", + "lazy_static", + "libsecp256k1", + "log", + "multihash", + "multistream-select", + "parity-multiaddr", + "parking_lot 0.11.1", + "pin-project 1.0.5", + "prost", + "prost-build", + "rand 0.7.3", + "ring", + "rw-stream-sink", + "sha2 0.9.3", + "smallvec 1.6.1", + "thiserror", + "unsigned-varint 0.7.0", + "void", + "zeroize", +] + +[[package]] +name = "libp2p-deflate" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2181a641cd15f9b6ba71b1335800f309012a0a97a29ffaabbbf40e9d3d58f08" +dependencies = [ + "flate2", + "futures 0.3.13", + "libp2p-core", +] + +[[package]] +name = "libp2p-dns" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e63dab8b5ff35e0c101a3e51e843ba782c07bbb1682f5fd827622e0d02b98b" +dependencies = [ + "async-std-resolver", + "futures 0.3.13", + "libp2p-core", + "log", + "smallvec 1.6.1", + "trust-dns-resolver", +] + +[[package]] +name = "libp2p-floodsub" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897645f99e9b396df256a6aa8ba8c4bc019ac6b7c62556f624b5feea9acc82bb" +dependencies = [ + "cuckoofilter", + "fnv", + "futures 0.3.13", + "libp2p-core", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.6.1", +] + +[[package]] +name = "libp2p-gossipsub" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "794b0c85f5df1acbc1fc38414d37272594811193b6325c76d3931c3e3f5df8c0" +dependencies = [ + "asynchronous-codec 0.6.0", + "base64 0.13.0", + "byteorder", + "bytes 1.0.1", + "fnv", + "futures 0.3.13", + "hex_fmt", + "libp2p-core", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "rand 0.7.3", + "regex", + "sha2 0.9.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", + "wasm-timer", +] + +[[package]] +name = "libp2p-identify" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f88ebc841d744979176ab4b8b294a3e655a7ba4ef26a905d073a52b49ed4dff5" +dependencies = [ + "futures 0.3.13", + "libp2p-core", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "smallvec 1.6.1", + "wasm-timer", +] + +[[package]] +name = "libp2p-kad" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb5b90b6bda749023a85f60b49ea74b387c25f17d8df541ae72a3c75dd52e63" +dependencies = [ + "arrayvec 0.5.2", + "asynchronous-codec 0.6.0", + "bytes 1.0.1", + "either", + "fnv", + "futures 0.3.13", + "libp2p-core", + "libp2p-swarm", + "log", + "prost", + "prost-build", + "rand 0.7.3", + "sha2 0.9.3", + "smallvec 1.6.1", + "uint", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-mdns" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be28ca13bb648d249a9baebd750ebc64ce7040ddd5f0ce1035ff1f4549fb596d" +dependencies = [ + "async-io", + "data-encoding", + "dns-parser", + "futures 0.3.13", + "if-watch", + "lazy_static", + "libp2p-core", + "libp2p-swarm", + "log", + "rand 0.8.3", + "smallvec 1.6.1", + "socket2 0.4.0", + "void", +] + +[[package]] +name = "libp2p-mplex" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e9b544335d1ed30af71daa96edbefadef6f19c7a55f078b9fc92c87163105d" +dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.0.1", + "futures 0.3.13", + "libp2p-core", + "log", + "nohash-hasher", + "parking_lot 0.11.1", + "rand 0.7.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", +] + +[[package]] +name = "libp2p-noise" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36db0f0db3b0433f5b9463f1c0cd9eadc0a3734a9170439ce501ff99733a88bd" +dependencies = [ + "bytes 1.0.1", + "curve25519-dalek 3.0.2", + "futures 0.3.13", + "lazy_static", + "libp2p-core", + "log", + "prost", + "prost-build", + "rand 0.7.3", + "sha2 0.9.3", + "snow", + "static_assertions", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "libp2p-ping" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea10fc5209260915ea65b78f612d7ff78a29ab288e7aa3250796866af861c45" +dependencies = [ + "futures 0.3.13", + "libp2p-core", + "libp2p-swarm", + "log", + "rand 0.7.3", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8c37b4d2a075b4be8442760a5f8c037180f0c8dd5b5734b9978ab868b3aa11" +dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.0.1", + "futures 0.3.13", + "libp2p-core", + "log", + "prost", + "prost-build", + "unsigned-varint 0.7.0", + "void", +] + +[[package]] +name = "libp2p-pnet" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599" +dependencies = [ + "futures 0.3.13", + "log", + "pin-project 1.0.5", + "rand 0.7.3", + "salsa20", + "sha3", +] + +[[package]] +name = "libp2p-relay" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff268be6a9d6f3c6cca3b81bbab597b15217f9ad8787c6c40fc548c1af7cd24" +dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.0.1", + "futures 0.3.13", + "futures-timer 3.0.2", + "libp2p-core", + "libp2p-swarm", + "log", + "pin-project 1.0.5", + "prost", + "prost-build", + "rand 0.7.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-request-response" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "725367dd2318c54c5ab1a6418592e5b01c63b0dedfbbfb8389220b2bcf691899" +dependencies = [ + "async-trait", + "bytes 1.0.1", + "futures 0.3.13", + "libp2p-core", + "libp2p-swarm", + "log", + "lru", + "minicbor", + "rand 0.7.3", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", + "wasm-timer", +] + +[[package]] +name = "libp2p-swarm" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c26980cadd7c25d89071cb23e1f7f5df4863128cc91d83c6ddc72338cecafa" +dependencies = [ + "either", + "futures 0.3.13", + "libp2p-core", + "log", + "rand 0.7.3", + "smallvec 1.6.1", + "void", + "wasm-timer", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c564ebaa36a64839f51eaddb0243aaaa29ce64affb56129193cc3248b72af273" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "libp2p-tcp" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b1a27d21c477951799e99d5c105d78868258502ce092988040a808d5a19bbd9" +dependencies = [ + "async-io", + "futures 0.3.13", + "futures-timer 3.0.2", + "if-watch", + "ipnet", + "libc", + "libp2p-core", + "log", + "socket2 0.4.0", +] + +[[package]] +name = "libp2p-uds" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffd6564bb3b7ff203661ccbb69003c2b551e34cef974f2d6c6a28306a12170b5" +dependencies = [ + "async-std", + "futures 0.3.13", + "libp2p-core", + "log", +] + +[[package]] +name = "libp2p-wasm-ext" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cef45d61e43c313531b5e903e4e8415212ff6338e0c54c47da5b9b412b5760de" +dependencies = [ + "futures 0.3.13", + "js-sys", + "libp2p-core", + "parity-send-wrapper", + "wasm-bindgen", + "wasm-bindgen-futures", +] + +[[package]] +name = "libp2p-websocket" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cace60995ef6f637e4752cccbb2590f6bc358e8741a0d066307636c69a4b3a74" +dependencies = [ + "either", + "futures 0.3.13", + "futures-rustls", + "libp2p-core", + "log", + "quicksink", + "rw-stream-sink", + "soketto", + "url 2.2.1", + "webpki-roots", +] + +[[package]] +name = "libp2p-yamux" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d6144cc94143fb0a8dd1e7c2fbcc32a2808168bcd1d69920635424d5993b7b" +dependencies = [ + "futures 0.3.13", + "libp2p-core", + "parking_lot 0.11.1", + "thiserror", + "yamux", +] + +[[package]] +name = "librocksdb-sys" +version = "6.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09" +dependencies = [ + "bindgen", + "cc", + "glob", + "libc", +] + +[[package]] +name = "libsecp256k1" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +dependencies = [ + "arrayref", + "crunchy", + "digest 0.8.1", + "hmac-drbg", + "rand 0.7.3", + "sha2 0.8.2", + "subtle 2.4.0", + "typenum", +] + +[[package]] +name = "libz-sys" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" + +[[package]] +name = "linked_hash_set" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47186c6da4d81ca383c7c47c1bfc80f4b95f4720514d860a5407aaf4233f9588" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "linregress" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d0ad4b5cc8385a881c561fac3501353d63d2a2b7a357b5064d71815c9a92724" +dependencies = [ + "nalgebra", + "statrs", +] + +[[package]] +name = "lock_api" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "lock_api" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if 1.0.0", + "value-bag", +] + +[[package]] +name = "lru" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f374d42cdfc1d7dbf3d3dec28afab2eb97ffbf43a3234d795b5986dbf4b90ba" +dependencies = [ + "hashbrown", +] + +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "mach" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +dependencies = [ + "libc", +] + +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" + +[[package]] +name = "matrixmultiply" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1" +dependencies = [ + "rawpointer", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "memmap2" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04e3e85b970d650e2ae6d70592474087051c11c54da7f7b4949725c5735fbcc6" +dependencies = [ + "libc", +] + +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memoffset" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memory-db" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "814bbecfc0451fc314eeea34f05bbcd5b98a7ad7af37faee088b86a1e633f1d4" +dependencies = [ + "hash-db", + "hashbrown", + "parity-util-mem", +] + +[[package]] +name = "memory_units" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "messages-relay" +version = "0.1.0" +dependencies = [ + "async-std", + "async-trait", + "bp-messages", + "futures 0.3.13", + "hex", + "log", + "parking_lot 0.11.1", + "relay-utils", +] + +[[package]] +name = "millau-bridge-node" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-millau", + "bp-runtime", + "frame-benchmarking", + "frame-benchmarking-cli", + "jsonrpc-core 15.1.0", + "millau-runtime", + "node-inspect", + "pallet-bridge-messages", + "pallet-transaction-payment-rpc", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-aura", + "sc-executor", + "sc-finality-grandpa", + "sc-finality-grandpa-rpc", + "sc-keystore", + "sc-rpc", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sp-consensus", + "sp-consensus-aura", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-runtime", + "structopt", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "millau-runtime" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-messages", + "bp-millau", + "bp-rialto", + "bp-runtime", + "bp-westend", + "bridge-runtime-common", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "hex-literal 0.3.1", + "pallet-aura", + "pallet-balances", + "pallet-bridge-dispatch", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-grandpa", + "pallet-randomness-collective-flip", + "pallet-session", + "pallet-shift-session-manager", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec 2.0.1", + "serde", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-trie", + "sp-version", + "substrate-wasm-builder-runner", +] + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "minicbor" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea79ce4ab9f445ec6b71833a2290ac0a29c9dde0fa7cae4c481eecae021d9bd9" +dependencies = [ + "minicbor-derive", +] + +[[package]] +name = "minicbor-derive" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce18b5423c573a13e80cb3046ea0af6379ef725dc3af4886bdb8f4e5093068" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + +[[package]] +name = "mio" +version = "0.6.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" +dependencies = [ + "cfg-if 0.1.10", + "fuchsia-zircon", + "fuchsia-zircon-sys", + "iovec", + "kernel32-sys", + "libc", + "log", + "miow 0.2.2", + "net2", + "slab", + "winapi 0.2.8", +] + +[[package]] +name = "mio-extras" +version = "2.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" +dependencies = [ + "lazycell", + "log", + "mio", + "slab", +] + +[[package]] +name = "mio-named-pipes" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" +dependencies = [ + "log", + "mio", + "miow 0.3.6", + "winapi 0.3.9", +] + +[[package]] +name = "mio-uds" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" +dependencies = [ + "iovec", + "libc", + "mio", +] + +[[package]] +name = "miow" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" +dependencies = [ + "kernel32-sys", + "net2", + "winapi 0.2.8", + "ws2_32-sys", +] + +[[package]] +name = "miow" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" +dependencies = [ + "socket2 0.3.19", + "winapi 0.3.9", +] + +[[package]] +name = "more-asserts" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" + +[[package]] +name = "multibase" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b78c60039650ff12e140ae867ef5299a58e19dded4d334c849dc7177083667e2" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dac63698b887d2d929306ea48b63760431ff8a24fac40ddb22f9c7f49fb7cab" +dependencies = [ + "blake2b_simd", + "blake2s_simd", + "blake3", + "digest 0.9.0", + "generic-array 0.14.4", + "multihash-derive", + "sha2 0.9.3", + "sha3", + "unsigned-varint 0.5.1", +] + +[[package]] +name = "multihash-derive" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85ee3c48cb9d9b275ad967a0e96715badc13c6029adb92f34fa17b9ff28fd81f" +dependencies = [ + "proc-macro-crate 0.1.5", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "multimap" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333" + +[[package]] +name = "multistream-select" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5df70763c86c98487451f307e1b68b4100da9076f4c12146905fc2054277f4e8" +dependencies = [ + "bytes 1.0.1", + "futures 0.3.13", + "log", + "pin-project 1.0.5", + "smallvec 1.6.1", + "unsigned-varint 0.7.0", +] + +[[package]] +name = "nalgebra" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486" +dependencies = [ + "approx", + "generic-array 0.13.2", + "matrixmultiply", + "num-complex", + "num-rational", + "num-traits", + "rand 0.7.3", + "rand_distr", + "simba", + "typenum", +] + +[[package]] +name = "names" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da" +dependencies = [ + "rand 0.3.23", +] + +[[package]] +name = "nb-connect" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670361df1bc2399ee1ff50406a0d422587dd3bb0da596e1978fe8e05dabddf4f" +dependencies = [ + "libc", + "socket2 0.3.19", +] + +[[package]] +name = "net2" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "node-inspect" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "log", + "parity-scale-codec 2.0.1", + "sc-cli", + "sc-client-api", + "sc-service", + "sp-blockchain", + "sp-core", + "sp-runtime", + "structopt", +] + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + +[[package]] +name = "nom" +version = "5.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +dependencies = [ + "memchr", + "version_check", +] + +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-format" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" +dependencies = [ + "arrayvec 0.4.12", + "itoa", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" +dependencies = [ + "crc32fast", + "indexmap", +] + +[[package]] +name = "once_cell" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10acf907b94fc1b1a152d08ef97e7759650268cf986bf127f387e602b02c7e5a" +dependencies = [ + "parking_lot 0.11.1", +] + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "openssl-probe" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" + +[[package]] +name = "openssl-sys" +version = "0.9.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "owning_ref" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" +dependencies = [ + "stable_deref_trait", +] + +[[package]] +name = "pallet-aura" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec 2.0.1", + "sp-application-crypto", + "sp-consensus-aura", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-authorship" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "parity-scale-codec 2.0.1", + "sp-authorship", + "sp-inherents", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-balances" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec 2.0.1", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-bridge-currency-exchange" +version = "0.1.0" +dependencies = [ + "bp-currency-exchange", + "bp-header-chain", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-bridge-dispatch" +version = "0.1.0" +dependencies = [ + "bp-message-dispatch", + "bp-runtime", + "frame-support", + "frame-system", + "log", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-bridge-eth-poa" +version = "0.1.0" +dependencies = [ + "bp-eth-poa", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal 0.3.1", + "libsecp256k1", + "log", + "parity-scale-codec 2.0.1", + "serde", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-bridge-grandpa" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-runtime", + "bp-test-utils", + "finality-grandpa 0.14.0", + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "num-traits", + "parity-scale-codec 2.0.1", + "serde", + "sp-finality-grandpa", + "sp-io", + "sp-runtime", + "sp-std", + "sp-trie", +] + +[[package]] +name = "pallet-bridge-messages" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-rialto", + "bp-runtime", + "frame-benchmarking", + "frame-support", + "frame-system", + "hex", + "hex-literal 0.3.1", + "log", + "num-traits", + "pallet-balances", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-grandpa" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log", + "pallet-authorship", + "pallet-session", + "parity-scale-codec 2.0.1", + "sp-application-crypto", + "sp-core", + "sp-finality-grandpa", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std", +] + +[[package]] +name = "pallet-randomness-collective-flip" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec 2.0.1", + "safe-mix", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-session" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "pallet-timestamp", + "parity-scale-codec 2.0.1", + "sp-core", + "sp-io", + "sp-runtime", + "sp-session", + "sp-staking", + "sp-std", + "sp-trie", +] + +[[package]] +name = "pallet-shift-session-manager" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "pallet-session", + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-runtime", + "sp-staking", + "sp-std", +] + +[[package]] +name = "pallet-sudo" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec 2.0.1", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-timestamp" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "log", + "parity-scale-codec 2.0.1", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + +[[package]] +name = "pallet-transaction-payment" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec 2.0.1", + "serde", + "smallvec 1.6.1", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-transaction-payment-rpc" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "jsonrpc-core 15.1.0", + "jsonrpc-core-client", + "jsonrpc-derive", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-rpc", + "sp-runtime", +] + +[[package]] +name = "pallet-transaction-payment-rpc-runtime-api" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "sp-api", + "sp-runtime", +] + +[[package]] +name = "parity-bytes" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67" + +[[package]] +name = "parity-db" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "495197c078e54b8735181aa35c00a327f7f3a3cc00a1ee8c95926dd010f0ec6b" +dependencies = [ + "blake2-rfc", + "crc32fast", + "fs2", + "hex", + "libc", + "log", + "memmap2", + "parking_lot 0.11.1", + "rand 0.8.3", +] + +[[package]] +name = "parity-multiaddr" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4" +dependencies = [ + "arrayref", + "bs58", + "byteorder", + "data-encoding", + "multihash", + "percent-encoding 2.1.0", + "serde", + "static_assertions", + "unsigned-varint 0.7.0", + "url 2.2.1", +] + +[[package]] +name = "parity-scale-codec" +version = "1.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4b26b16c7687c3075982af47719e481815df30bc544f7a6690763a25ca16e9d" +dependencies = [ + "arrayvec 0.5.2", + "bitvec 0.17.4", + "byte-slice-cast 0.3.5", + "parity-scale-codec-derive 1.2.3", + "serde", +] + +[[package]] +name = "parity-scale-codec" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cd3dab59b5cf4bc81069ade0fc470341a1ef3ad5fa73e5a8943bed2ec12b2e8" +dependencies = [ + "arrayvec 0.5.2", + "bitvec 0.20.1", + "byte-slice-cast 1.0.0", + "parity-scale-codec-derive 2.0.1", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c41512944b1faff334a5f1b9447611bf4ef40638ccb6328173dacefb338e878c" +dependencies = [ + "proc-macro-crate 0.1.5", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa04976a81fde04924b40cc4036c4d12841e8bb04325a5cf2ada75731a150a7d" +dependencies = [ + "proc-macro-crate 0.1.5", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-send-wrapper" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" + +[[package]] +name = "parity-tokio-ipc" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "libc", + "log", + "mio-named-pipes", + "miow 0.3.6", + "rand 0.7.3", + "tokio 0.1.22", + "tokio-named-pipes", + "tokio-uds", + "winapi 0.3.9", +] + +[[package]] +name = "parity-util-mem" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "664a8c6b8e62d8f9f2f937e391982eb433ab285b4cd9545b342441e04a906e42" +dependencies = [ + "cfg-if 1.0.0", + "hashbrown", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.11.1", + "primitive-types", + "smallvec 1.6.1", + "winapi 0.3.9", +] + +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2", + "syn", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" + +[[package]] +name = "parity-ws" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e02a625dd75084c2a7024f07c575b61b782f729d18702dabb3cdbf31911dc61" +dependencies = [ + "byteorder", + "bytes 0.4.12", + "httparse", + "log", + "mio", + "mio-extras", + "rand 0.7.3", + "sha-1 0.8.2", + "slab", + "url 2.2.1", +] + +[[package]] +name = "parking" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" + +[[package]] +name = "parking_lot" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" +dependencies = [ + "lock_api 0.3.4", + "parking_lot_core 0.6.2", + "rustc_version", +] + +[[package]] +name = "parking_lot" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" +dependencies = [ + "lock_api 0.3.4", + "parking_lot_core 0.7.2", +] + +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api 0.4.2", + "parking_lot_core 0.8.3", +] + +[[package]] +name = "parking_lot_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +dependencies = [ + "cfg-if 0.1.10", + "cloudabi", + "libc", + "redox_syscall 0.1.57", + "rustc_version", + "smallvec 0.6.14", + "winapi 0.3.9", +] + +[[package]] +name = "parking_lot_core" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" +dependencies = [ + "cfg-if 0.1.10", + "cloudabi", + "libc", + "redox_syscall 0.1.57", + "smallvec 1.6.1", + "winapi 0.3.9", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall 0.2.5", + "smallvec 1.6.1", + "winapi 0.3.9", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1" + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "pbkdf2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" +dependencies = [ + "byteorder", + "crypto-mac 0.7.0", +] + +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", +] + +[[package]] +name = "pdqselect" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec91767ecc0a0bbe558ce8c9da33c068066c57ecc8bb8477ef8c1ad3ef77c27" + +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "percent-encoding" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pest" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +dependencies = [ + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" +dependencies = [ + "maplit", + "pest", + "sha-1 0.8.2", +] + +[[package]] +name = "petgraph" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7" +dependencies = [ + "fixedbitset", + "indexmap", +] + +[[package]] +name = "pin-project" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15" +dependencies = [ + "pin-project-internal 0.4.27", +] + +[[package]] +name = "pin-project" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" +dependencies = [ + "pin-project-internal 1.0.5", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b" + +[[package]] +name = "pin-project-lite" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" + +[[package]] +name = "plain_hasher" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e19e6491bdde87c2c43d70f4c194bc8a758f2eb732df00f61e43f7362e3b4cc" +dependencies = [ + "crunchy", +] + +[[package]] +name = "platforms" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" + +[[package]] +name = "polling" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "log", + "wepoll-sys", + "winapi 0.3.9", +] + +[[package]] +name = "poly1305" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8" +dependencies = [ + "cpuid-bool 0.2.0", + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd" +dependencies = [ + "cpuid-bool 0.2.0", + "opaque-debug 0.3.0", + "universal-hash", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "primitive-types" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2415937401cb030a2a0a4d922483f945fa068f52a7dbb22ce0fe5f2b6f6adace" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-rlp", + "impl-serde", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-crate" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" +dependencies = [ + "thiserror", + "toml", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro-nested" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" + +[[package]] +name = "proc-macro2" +version = "1.0.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "prometheus" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" +dependencies = [ + "cfg-if 1.0.0", + "fnv", + "lazy_static", + "parking_lot 0.11.1", + "regex", + "thiserror", +] + +[[package]] +name = "prost" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" +dependencies = [ + "bytes 1.0.1", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" +dependencies = [ + "bytes 1.0.1", + "heck", + "itertools 0.9.0", + "log", + "multimap", + "petgraph", + "prost", + "prost-types", + "tempfile", + "which 4.0.2", +] + +[[package]] +name = "prost-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" +dependencies = [ + "anyhow", + "itertools 0.9.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" +dependencies = [ + "bytes 1.0.1", + "prost", +] + +[[package]] +name = "psm" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3abf49e5417290756acfd26501536358560c4a5cc4a0934d390939acb3e7083a" +dependencies = [ + "cc", +] + +[[package]] +name = "pwasm-utils" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" +dependencies = [ + "byteorder", + "log", + "parity-wasm", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quick-error" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda" + +[[package]] +name = "quicksink" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project-lite 0.1.11", +] + +[[package]] +name = "quote" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + +[[package]] +name = "rand" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" +dependencies = [ + "libc", + "rand 0.4.6", +] + +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi 0.3.9", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", + "rand_pcg", +] + +[[package]] +name = "rand" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.2", + "rand_hc 0.3.0", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.2", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" +dependencies = [ + "getrandom 0.2.2", +] + +[[package]] +name = "rand_distr" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" +dependencies = [ + "rand 0.7.3", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core 0.6.2", +] + +[[package]] +name = "rand_pcg" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + +[[package]] +name = "rayon" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674" +dependencies = [ + "autocfg", + "crossbeam-deque 0.8.0", + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" +dependencies = [ + "crossbeam-channel", + "crossbeam-deque 0.8.0", + "crossbeam-utils 0.8.3", + "lazy_static", + "num_cpus", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "redox_syscall" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" +dependencies = [ + "bitflags", +] + +[[package]] +name = "redox_users" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" +dependencies = [ + "getrandom 0.1.16", + "redox_syscall 0.1.57", + "rust-argon2", +] + +[[package]] +name = "redox_users" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +dependencies = [ + "getrandom 0.2.2", + "redox_syscall 0.2.5", +] + +[[package]] +name = "ref-cast" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regalloc" +version = "0.0.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" +dependencies = [ + "log", + "rustc-hash", + "serde", + "smallvec 1.6.1", +] + +[[package]] +name = "regex" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" + +[[package]] +name = "region" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0" +dependencies = [ + "bitflags", + "libc", + "mach", + "winapi 0.3.9", +] + +[[package]] +name = "relay-ethereum-client" +version = "0.1.0" +dependencies = [ + "bp-eth-poa", + "headers-relay", + "hex-literal 0.3.1", + "jsonrpsee-proc-macros", + "jsonrpsee-ws-client", + "libsecp256k1", + "log", + "parity-scale-codec 2.0.1", + "relay-utils", + "web3", +] + +[[package]] +name = "relay-kusama-client" +version = "0.1.0" +dependencies = [ + "bp-kusama", + "frame-support", + "frame-system", + "headers-relay", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "sp-core", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "relay-millau-client" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "headers-relay", + "millau-runtime", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "sp-core", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "relay-polkadot-client" +version = "0.1.0" +dependencies = [ + "bp-polkadot", + "frame-support", + "frame-system", + "headers-relay", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "sp-core", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "relay-rialto-client" +version = "0.1.0" +dependencies = [ + "frame-support", + "frame-system", + "headers-relay", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "rialto-runtime", + "sp-core", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "relay-rococo-client" +version = "0.1.0" +dependencies = [ + "bp-rococo", + "frame-support", + "frame-system", + "headers-relay", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "sp-core", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "relay-substrate-client" +version = "0.1.0" +dependencies = [ + "async-std", + "async-trait", + "bp-header-chain", + "bp-messages", + "bp-runtime", + "finality-relay", + "frame-support", + "frame-system", + "futures 0.3.13", + "headers-relay", + "jsonrpsee-proc-macros", + "jsonrpsee-ws-client", + "log", + "num-traits", + "pallet-balances", + "parity-scale-codec 2.0.1", + "rand 0.7.3", + "relay-utils", + "sc-rpc-api", + "sp-core", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", + "sp-storage", + "sp-trie", + "sp-version", +] + +[[package]] +name = "relay-utils" +version = "0.1.0" +dependencies = [ + "ansi_term 0.12.1", + "async-std", + "async-trait", + "backoff", + "env_logger 0.8.3", + "futures 0.3.13", + "isahc", + "jsonpath_lib", + "log", + "num-traits", + "serde_json", + "substrate-prometheus-endpoint", + "sysinfo", + "time 0.2.25", +] + +[[package]] +name = "relay-westend-client" +version = "0.1.0" +dependencies = [ + "bp-westend", + "frame-support", + "frame-system", + "headers-relay", + "pallet-transaction-payment", + "parity-scale-codec 2.0.1", + "relay-substrate-client", + "relay-utils", + "sp-core", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error 1.2.3", +] + +[[package]] +name = "retain_mut" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53552c6c49e1e13f1a203ef0080ab3bbef0beb570a528993e83df057a9d9bba1" + +[[package]] +name = "rialto-bridge-node" +version = "0.1.0" +dependencies = [ + "bp-messages", + "bp-rialto", + "bp-runtime", + "frame-benchmarking", + "frame-benchmarking-cli", + "jsonrpc-core 15.1.0", + "node-inspect", + "pallet-bridge-messages", + "pallet-transaction-payment-rpc", + "rialto-runtime", + "sc-basic-authorship", + "sc-cli", + "sc-client-api", + "sc-consensus", + "sc-consensus-aura", + "sc-executor", + "sc-finality-grandpa", + "sc-finality-grandpa-rpc", + "sc-keystore", + "sc-rpc", + "sc-service", + "sc-telemetry", + "sc-transaction-pool", + "sp-consensus", + "sp-consensus-aura", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-runtime", + "structopt", + "substrate-build-script-utils", + "substrate-frame-rpc-system", +] + +[[package]] +name = "rialto-runtime" +version = "0.1.0" +dependencies = [ + "bp-currency-exchange", + "bp-eth-poa", + "bp-header-chain", + "bp-message-dispatch", + "bp-messages", + "bp-millau", + "bp-rialto", + "bp-runtime", + "bridge-runtime-common", + "frame-benchmarking", + "frame-executive", + "frame-support", + "frame-system", + "frame-system-rpc-runtime-api", + "hex-literal 0.3.1", + "libsecp256k1", + "log", + "pallet-aura", + "pallet-balances", + "pallet-bridge-currency-exchange", + "pallet-bridge-dispatch", + "pallet-bridge-eth-poa", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-grandpa", + "pallet-randomness-collective-flip", + "pallet-session", + "pallet-shift-session-manager", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "parity-scale-codec 2.0.1", + "serde", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-trie", + "sp-version", + "substrate-wasm-builder-runner", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi 0.3.9", +] + +[[package]] +name = "rlp" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e54369147e3e7796c9b885c7304db87ca3d09a0a98f72843d532868675bbfba8" +dependencies = [ + "bytes 1.0.1", + "rustc-hex", +] + +[[package]] +name = "rocksdb" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d83c02c429044d58474eaf5ae31e062d0de894e21125b47437ec0edc1397e6" +dependencies = [ + "libc", + "librocksdb-sys", +] + +[[package]] +name = "rpassword" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "rust-argon2" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" +dependencies = [ + "base64 0.13.0", + "blake2b_simd", + "constant_time_eq", + "crossbeam-utils 0.8.3", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "rustls" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +dependencies = [ + "base64 0.12.3", + "log", + "ring", + "sct", + "webpki 0.21.4", +] + +[[package]] +name = "rustls" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +dependencies = [ + "base64 0.13.0", + "log", + "ring", + "sct", + "webpki 0.21.4", +] + +[[package]] +name = "rustls-native-certs" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" +dependencies = [ + "openssl-probe", + "rustls 0.18.1", + "schannel", + "security-framework", +] + +[[package]] +name = "ruzstd" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d425143485a37727c7a46e689bbe3b883a00f42b4a52c4ac0f44855c1009b00" +dependencies = [ + "byteorder", + "twox-hash", +] + +[[package]] +name = "rw-stream-sink" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" +dependencies = [ + "futures 0.3.13", + "pin-project 0.4.27", + "static_assertions", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "safe-mix" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d3d055a2582e6b00ed7a31c1524040aa391092bf636328350813f3a0605215c" +dependencies = [ + "rustc_version", +] + +[[package]] +name = "salsa20" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" +dependencies = [ + "cipher", +] + +[[package]] +name = "sc-basic-authorship" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "futures-timer 3.0.2", + "log", + "parity-scale-codec 2.0.1", + "sc-block-builder", + "sc-client-api", + "sc-proposer-metrics", + "sc-telemetry", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-transaction-pool", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-block-builder" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sc-client-api", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-state-machine", +] + +[[package]] +name = "sc-chain-spec" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec 2.0.1", + "sc-chain-spec-derive", + "sc-consensus-babe", + "sc-consensus-epochs", + "sc-finality-grandpa", + "sc-network", + "sc-telemetry", + "serde", + "serde_json", + "sp-chain-spec", + "sp-consensus-babe", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "sc-chain-spec-derive" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sc-cli" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "chrono", + "fdlimit", + "futures 0.3.13", + "hex", + "libp2p", + "log", + "names", + "parity-scale-codec 2.0.1", + "rand 0.7.3", + "regex", + "rpassword", + "sc-client-api", + "sc-keystore", + "sc-network", + "sc-service", + "sc-telemetry", + "sc-tracing", + "serde", + "serde_json", + "sp-blockchain", + "sp-core", + "sp-keyring", + "sp-keystore", + "sp-panic-handler", + "sp-runtime", + "sp-utils", + "sp-version", + "structopt", + "thiserror", + "tiny-bip39", + "tokio 0.2.25", +] + +[[package]] +name = "sc-client-api" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "fnv", + "futures 0.3.13", + "hash-db", + "kvdb", + "lazy_static", + "log", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "sc-executor", + "sp-api", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-database", + "sp-externalities", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-storage", + "sp-transaction-pool", + "sp-trie", + "sp-utils", + "sp-version", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-client-db" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "blake2-rfc", + "hash-db", + "kvdb", + "kvdb-memorydb", + "kvdb-rocksdb", + "linked-hash-map", + "log", + "parity-db", + "parity-scale-codec 2.0.1", + "parity-util-mem", + "parking_lot 0.11.1", + "sc-client-api", + "sc-executor", + "sc-state-db", + "sp-arithmetic", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-database", + "sp-runtime", + "sp-state-machine", + "sp-trie", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-consensus" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parking_lot 0.11.1", + "sc-client-api", + "sp-blockchain", + "sp-consensus", + "sp-runtime", +] + +[[package]] +name = "sc-consensus-aura" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "derive_more", + "futures 0.3.13", + "futures-timer 3.0.2", + "log", + "parity-scale-codec 2.0.1", + "sc-block-builder", + "sc-client-api", + "sc-consensus-slots", + "sc-telemetry", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-aura", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "sp-version", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-consensus-babe" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "derive_more", + "fork-tree", + "futures 0.3.13", + "futures-timer 3.0.2", + "log", + "merlin", + "num-bigint", + "num-rational", + "num-traits", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "pdqselect", + "rand 0.7.3", + "retain_mut", + "sc-client-api", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-consensus-uncles", + "sc-keystore", + "sc-telemetry", + "schnorrkel", + "serde", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-babe", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keystore", + "sp-runtime", + "sp-timestamp", + "sp-utils", + "sp-version", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-consensus-epochs" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "fork-tree", + "parity-scale-codec 2.0.1", + "sc-client-api", + "sc-consensus", + "sp-blockchain", + "sp-runtime", +] + +[[package]] +name = "sc-consensus-slots" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "futures 0.3.13", + "futures-timer 3.0.2", + "log", + "parity-scale-codec 2.0.1", + "sc-client-api", + "sc-telemetry", + "sp-api", + "sp-application-crypto", + "sp-arithmetic", + "sp-blockchain", + "sp-consensus", + "sp-consensus-slots", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-state-machine", + "sp-timestamp", + "sp-trie", + "thiserror", +] + +[[package]] +name = "sc-consensus-uncles" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "sc-client-api", + "sp-authorship", + "sp-consensus", + "sp-core", + "sp-inherents", + "sp-runtime", +] + +[[package]] +name = "sc-executor" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "lazy_static", + "libsecp256k1", + "log", + "parity-scale-codec 2.0.1", + "parity-wasm", + "parking_lot 0.11.1", + "sc-executor-common", + "sc-executor-wasmi", + "sc-executor-wasmtime", + "sp-api", + "sp-core", + "sp-externalities", + "sp-io", + "sp-maybe-compressed-blob", + "sp-panic-handler", + "sp-runtime-interface", + "sp-serializer", + "sp-tasks", + "sp-trie", + "sp-version", + "sp-wasm-interface", + "wasmi", +] + +[[package]] +name = "sc-executor-common" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "parity-scale-codec 2.0.1", + "parity-wasm", + "pwasm-utils", + "sp-allocator", + "sp-core", + "sp-serializer", + "sp-wasm-interface", + "thiserror", + "wasmi", +] + +[[package]] +name = "sc-executor-wasmi" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "parity-scale-codec 2.0.1", + "sc-executor-common", + "sp-allocator", + "sp-core", + "sp-runtime-interface", + "sp-wasm-interface", + "wasmi", +] + +[[package]] +name = "sc-executor-wasmtime" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "parity-scale-codec 2.0.1", + "parity-wasm", + "pwasm-utils", + "sc-executor-common", + "scoped-tls", + "sp-allocator", + "sp-core", + "sp-runtime-interface", + "sp-wasm-interface", + "wasmtime", +] + +[[package]] +name = "sc-finality-grandpa" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "derive_more", + "dyn-clone", + "finality-grandpa 0.14.0", + "fork-tree", + "futures 0.3.13", + "futures-timer 3.0.2", + "linked-hash-map", + "log", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "pin-project 1.0.5", + "rand 0.7.3", + "sc-block-builder", + "sc-client-api", + "sc-consensus", + "sc-keystore", + "sc-network", + "sc-network-gossip", + "sc-telemetry", + "serde_json", + "sp-api", + "sp-application-crypto", + "sp-arithmetic", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-utils", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-finality-grandpa-rpc" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "finality-grandpa 0.14.0", + "futures 0.3.13", + "jsonrpc-core 15.1.0", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-pubsub", + "log", + "parity-scale-codec 2.0.1", + "sc-client-api", + "sc-finality-grandpa", + "sc-rpc", + "serde", + "serde_json", + "sp-blockchain", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "sc-informant" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "ansi_term 0.12.1", + "futures 0.3.13", + "log", + "parity-util-mem", + "sc-client-api", + "sc-network", + "sp-blockchain", + "sp-runtime", + "sp-transaction-pool", + "sp-utils", + "wasm-timer", +] + +[[package]] +name = "sc-keystore" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "derive_more", + "futures 0.3.13", + "futures-util", + "hex", + "merlin", + "parking_lot 0.11.1", + "rand 0.7.3", + "serde_json", + "sp-application-crypto", + "sp-core", + "sp-keystore", + "subtle 2.4.0", +] + +[[package]] +name = "sc-light" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "hash-db", + "lazy_static", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "sc-client-api", + "sc-executor", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-externalities", + "sp-runtime", + "sp-state-machine", +] + +[[package]] +name = "sc-network" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-std", + "async-trait", + "asynchronous-codec 0.5.0", + "bitflags", + "bs58", + "bytes 1.0.1", + "cid", + "derive_more", + "either", + "erased-serde", + "fnv", + "fork-tree", + "futures 0.3.13", + "futures-timer 3.0.2", + "hex", + "ip_network", + "libp2p", + "linked-hash-map", + "linked_hash_set", + "log", + "lru", + "nohash-hasher", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "pin-project 1.0.5", + "prost", + "prost-build", + "rand 0.7.3", + "sc-block-builder", + "sc-client-api", + "sc-peerset", + "serde", + "serde_json", + "smallvec 1.6.1", + "sp-arithmetic", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-runtime", + "sp-utils", + "substrate-prometheus-endpoint", + "thiserror", + "unsigned-varint 0.6.0", + "void", + "wasm-timer", + "zeroize", +] + +[[package]] +name = "sc-network-gossip" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "futures-timer 3.0.2", + "libp2p", + "log", + "lru", + "sc-network", + "sp-runtime", + "substrate-prometheus-endpoint", + "tracing", + "wasm-timer", +] + +[[package]] +name = "sc-offchain" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "bytes 0.5.6", + "fnv", + "futures 0.3.13", + "futures-timer 3.0.2", + "hex", + "hyper 0.13.10", + "hyper-rustls", + "log", + "num_cpus", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "rand 0.7.3", + "sc-client-api", + "sc-keystore", + "sc-network", + "sp-api", + "sp-core", + "sp-offchain", + "sp-runtime", + "sp-utils", + "threadpool", +] + +[[package]] +name = "sc-peerset" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "libp2p", + "log", + "serde_json", + "sp-utils", + "wasm-timer", +] + +[[package]] +name = "sc-proposer-metrics" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-rpc" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "hash-db", + "jsonrpc-core 15.1.0", + "jsonrpc-pubsub", + "log", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "sc-block-builder", + "sc-client-api", + "sc-executor", + "sc-keystore", + "sc-rpc-api", + "sc-tracing", + "serde_json", + "sp-api", + "sp-blockchain", + "sp-chain-spec", + "sp-core", + "sp-keystore", + "sp-offchain", + "sp-rpc", + "sp-runtime", + "sp-session", + "sp-state-machine", + "sp-transaction-pool", + "sp-utils", + "sp-version", +] + +[[package]] +name = "sc-rpc-api" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "futures 0.3.13", + "jsonrpc-core 15.1.0", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-pubsub", + "log", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "serde", + "serde_json", + "sp-chain-spec", + "sp-core", + "sp-rpc", + "sp-runtime", + "sp-transaction-pool", + "sp-version", +] + +[[package]] +name = "sc-rpc-server" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.1.31", + "jsonrpc-core 15.1.0", + "jsonrpc-http-server", + "jsonrpc-ipc-server", + "jsonrpc-pubsub", + "jsonrpc-ws-server", + "log", + "serde", + "serde_json", + "sp-runtime", + "substrate-prometheus-endpoint", +] + +[[package]] +name = "sc-service" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "directories", + "exit-future", + "futures 0.1.31", + "futures 0.3.13", + "futures-timer 3.0.2", + "hash-db", + "jsonrpc-core 15.1.0", + "jsonrpc-pubsub", + "lazy_static", + "log", + "parity-scale-codec 2.0.1", + "parity-util-mem", + "parking_lot 0.11.1", + "pin-project 1.0.5", + "rand 0.7.3", + "sc-block-builder", + "sc-chain-spec", + "sc-client-api", + "sc-client-db", + "sc-executor", + "sc-informant", + "sc-keystore", + "sc-light", + "sc-network", + "sc-offchain", + "sc-rpc", + "sc-rpc-server", + "sc-telemetry", + "sc-tracing", + "sc-transaction-pool", + "serde", + "serde_json", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-core", + "sp-externalities", + "sp-inherents", + "sp-io", + "sp-keystore", + "sp-runtime", + "sp-session", + "sp-state-machine", + "sp-tracing", + "sp-transaction-pool", + "sp-trie", + "sp-utils", + "sp-version", + "substrate-prometheus-endpoint", + "tempfile", + "thiserror", + "tracing", + "tracing-futures", + "wasm-timer", +] + +[[package]] +name = "sc-state-db" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "parity-scale-codec 2.0.1", + "parity-util-mem", + "parity-util-mem-derive", + "parking_lot 0.11.1", + "sc-client-api", + "sp-core", + "thiserror", +] + +[[package]] +name = "sc-telemetry" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "chrono", + "futures 0.3.13", + "libp2p", + "log", + "parking_lot 0.11.1", + "pin-project 1.0.5", + "rand 0.7.3", + "serde", + "serde_json", + "take_mut", + "thiserror", + "void", + "wasm-timer", +] + +[[package]] +name = "sc-tracing" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "ansi_term 0.12.1", + "atty", + "erased-serde", + "lazy_static", + "log", + "once_cell", + "parking_lot 0.11.1", + "regex", + "rustc-hash", + "sc-tracing-proc-macro", + "serde", + "serde_json", + "sp-tracing", + "thiserror", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "sc-tracing-proc-macro" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sc-transaction-graph" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "futures 0.3.13", + "linked-hash-map", + "log", + "parity-util-mem", + "parking_lot 0.11.1", + "retain_mut", + "serde", + "sp-blockchain", + "sp-core", + "sp-runtime", + "sp-transaction-pool", + "sp-utils", + "thiserror", + "wasm-timer", +] + +[[package]] +name = "sc-transaction-pool" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "futures-diagnose", + "intervalier", + "log", + "parity-scale-codec 2.0.1", + "parity-util-mem", + "parking_lot 0.11.1", + "sc-client-api", + "sc-transaction-graph", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-runtime", + "sp-tracing", + "sp-transaction-pool", + "sp-utils", + "substrate-prometheus-endpoint", + "thiserror", + "wasm-timer", +] + +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi 0.3.9", +] + +[[package]] +name = "schnorrkel" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.2", + "getrandom 0.1.16", + "merlin", + "rand 0.7.3", + "rand_core 0.5.1", + "serde", + "sha2 0.8.2", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "scroll" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fda28d4b4830b807a8b43f7b0e6b5df875311b3e7621d84577188c175b6ec1ec" +dependencies = [ + "scroll_derive", +] + +[[package]] +name = "scroll_derive" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aaaae8f38bb311444cfb7f1979af0bc9240d95795f75f9ceddf6a59b79ceffa0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "secrecy" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" +dependencies = [ + "zeroize", +] + +[[package]] +name = "security-framework" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys 0.7.0", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7" +dependencies = [ + "core-foundation-sys 0.7.0", + "libc", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.124" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-big-array" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "883eee5198ea51720eab8be52a36cf6c0164ac90eea0ed95b649d5e35382404e" +dependencies = [ + "serde", + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.124" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha-1" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha-1" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool 0.1.2", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sha1" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool 0.1.2", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sha3" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +dependencies = [ + "block-buffer 0.9.0", + "digest 0.9.0", + "keccak", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sharded-slab" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" + +[[package]] +name = "signal-hook" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7f3f92a1da3d6b1d32245d0cbcbbab0cfc45996d8df619c42bccfa6d2bbb5f" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-registry" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" + +[[package]] +name = "simba" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb931b1367faadea6b1ab1c306a860ec17aaa5fa39f367d0c744e69d971a1fb2" +dependencies = [ + "approx", + "num-complex", + "num-traits", + "paste 0.1.18", +] + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "sluice" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fa0333a60ff2e3474a6775cc611840c2a55610c831dd366503474c02f1a28f5" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", +] + +[[package]] +name = "smallvec" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "smallvec" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" + +[[package]] +name = "snow" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50" +dependencies = [ + "aes-gcm", + "blake2", + "chacha20poly1305", + "rand 0.7.3", + "rand_core 0.5.1", + "ring", + "rustc_version", + "sha2 0.9.3", + "subtle 2.4.0", + "x25519-dalek", +] + +[[package]] +name = "socket2" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "socket2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "soketto" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5c71ed3d54db0a699f4948e1bb3e45b450fa31fe602621dee6680361d569c88" +dependencies = [ + "base64 0.12.3", + "bytes 0.5.6", + "flate2", + "futures 0.3.13", + "httparse", + "log", + "rand 0.7.3", + "sha-1 0.9.4", +] + +[[package]] +name = "sp-allocator" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "sp-core", + "sp-std", + "sp-wasm-interface", + "thiserror", +] + +[[package]] +name = "sp-api" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "hash-db", + "log", + "parity-scale-codec 2.0.1", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-version", + "thiserror", +] + +[[package]] +name = "sp-api-proc-macro" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "blake2-rfc", + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-application-crypto" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-arithmetic" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec 2.0.1", + "serde", + "sp-debug-derive", + "sp-std", + "static_assertions", +] + +[[package]] +name = "sp-authorship" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-inherents", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-block-builder" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-api", + "sp-inherents", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-blockchain" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "log", + "lru", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "sp-api", + "sp-consensus", + "sp-database", + "sp-runtime", + "sp-state-machine", + "thiserror", +] + +[[package]] +name = "sp-chain-spec" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "sp-consensus" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "futures 0.3.13", + "futures-timer 3.0.2", + "libp2p", + "log", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "serde", + "sp-api", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", + "sp-utils", + "sp-version", + "substrate-prometheus-endpoint", + "thiserror", + "wasm-timer", +] + +[[package]] +name = "sp-consensus-aura" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-api", + "sp-application-crypto", + "sp-consensus", + "sp-consensus-slots", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + +[[package]] +name = "sp-consensus-babe" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "merlin", + "parity-scale-codec 2.0.1", + "serde", + "sp-api", + "sp-application-crypto", + "sp-consensus", + "sp-consensus-slots", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + +[[package]] +name = "sp-consensus-slots" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-arithmetic", + "sp-runtime", +] + +[[package]] +name = "sp-consensus-vrf" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "schnorrkel", + "sp-core", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-core" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "base58", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures 0.3.13", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec 2.0.1", + "parity-util-mem", + "parking_lot 0.11.1", + "primitive-types", + "rand 0.7.3", + "regex", + "schnorrkel", + "secrecy", + "serde", + "sha2 0.9.3", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize", +] + +[[package]] +name = "sp-database" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "kvdb", + "parking_lot 0.11.1", +] + +[[package]] +name = "sp-debug-derive" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "environmental", + "parity-scale-codec 2.0.1", + "sp-std", + "sp-storage", +] + +[[package]] +name = "sp-finality-grandpa" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "finality-grandpa 0.14.0", + "log", + "parity-scale-codec 2.0.1", + "serde", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-keystore", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-inherents" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "sp-core", + "sp-std", + "thiserror", +] + +[[package]] +name = "sp-io" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime-interface", + "sp-state-machine", + "sp-std", + "sp-tracing", + "sp-trie", + "sp-wasm-interface", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-keyring" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "lazy_static", + "sp-core", + "sp-runtime", + "strum", +] + +[[package]] +name = "sp-keystore" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-trait", + "derive_more", + "futures 0.3.13", + "merlin", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "schnorrkel", + "serde", + "sp-core", + "sp-externalities", +] + +[[package]] +name = "sp-maybe-compressed-blob" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "ruzstd", + "zstd", +] + +[[package]] +name = "sp-offchain" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "sp-api", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "sp-panic-handler" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "backtrace", +] + +[[package]] +name = "sp-rpc" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "serde", + "sp-core", +] + +[[package]] +name = "sp-runtime" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec 2.0.1", + "parity-util-mem", + "paste 1.0.4", + "rand 0.7.3", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-runtime-interface" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec 2.0.1", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "Inflector", + "proc-macro-crate 1.0.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-serializer" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "sp-session" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-api", + "sp-core", + "sp-runtime", + "sp-staking", + "sp-std", +] + +[[package]] +name = "sp-staking" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-state-machine" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "hash-db", + "log", + "num-traits", + "parity-scale-codec 2.0.1", + "parking_lot 0.11.1", + "rand 0.7.3", + "smallvec 1.6.1", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-std", + "sp-trie", + "thiserror", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-std" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" + +[[package]] +name = "sp-storage" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "impl-serde", + "parity-scale-codec 2.0.1", + "ref-cast", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-tasks" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "sp-core", + "sp-externalities", + "sp-io", + "sp-runtime-interface", + "sp-std", +] + +[[package]] +name = "sp-timestamp" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "parity-scale-codec 2.0.1", + "sp-api", + "sp-inherents", + "sp-runtime", + "sp-std", + "wasm-timer", +] + +[[package]] +name = "sp-tracing" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "log", + "parity-scale-codec 2.0.1", + "sp-std", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sp-transaction-pool" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "derive_more", + "futures 0.3.13", + "log", + "parity-scale-codec 2.0.1", + "serde", + "sp-api", + "sp-blockchain", + "sp-runtime", + "thiserror", +] + +[[package]] +name = "sp-trie" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec 2.0.1", + "sp-core", + "sp-std", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-utils" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "futures 0.3.13", + "futures-core", + "futures-timer 3.0.2", + "lazy_static", + "prometheus", +] + +[[package]] +name = "sp-version" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "impl-serde", + "parity-scale-codec 2.0.1", + "serde", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-wasm-interface" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec 2.0.1", + "sp-std", + "wasmi", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spinning_top" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bd0ab6b8c375d2d963503b90d3770010d95bc3b5f98036f948dee24bf4e8879" +dependencies = [ + "lock_api 0.4.2", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "standback" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2beb4d1860a61f571530b3f855a1b538d0200f7871c63331ecd6f17b1f014f8" +dependencies = [ + "version_check", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "statrs" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382" +dependencies = [ + "rand 0.7.3", +] + +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2", + "quote", + "serde", + "serde_derive", + "syn", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2", + "quote", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn", +] + +[[package]] +name = "stdweb-internal-runtime" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" + +[[package]] +name = "storage-proof-fuzzer" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-runtime", + "bp-test-utils", + "env_logger 0.8.3", + "finality-grandpa 0.12.3", + "frame-support", + "frame-system", + "hash-db", + "honggfuzz", + "log", + "parity-scale-codec 1.3.7", + "sp-core", + "sp-finality-grandpa", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", +] + +[[package]] +name = "stream-cipher" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" +dependencies = [ + "block-cipher", + "generic-array 0.14.4", +] + +[[package]] +name = "string" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" +dependencies = [ + "bytes 0.4.12", +] + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "structopt" +version = "0.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" +dependencies = [ + "clap", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "strum" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8bc6b87a5112aeeab1f4a9f7ab634fe6cbefc4850006df31267f4cfb9e3149" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "substrate-bip39" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" +dependencies = [ + "hmac 0.7.1", + "pbkdf2 0.3.0", + "schnorrkel", + "sha2 0.8.2", + "zeroize", +] + +[[package]] +name = "substrate-build-script-utils" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd540ba72520174c2c73ce96bf507eeba3cc8a481f58be92525b69110e1fa645" +dependencies = [ + "platforms", +] + +[[package]] +name = "substrate-frame-rpc-system" +version = "3.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "frame-system-rpc-runtime-api", + "futures 0.3.13", + "jsonrpc-core 15.1.0", + "jsonrpc-core-client", + "jsonrpc-derive", + "log", + "parity-scale-codec 2.0.1", + "sc-client-api", + "sc-rpc-api", + "serde", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-core", + "sp-runtime", + "sp-transaction-pool", +] + +[[package]] +name = "substrate-prometheus-endpoint" +version = "0.9.0" +source = "git+https://github.com/paritytech/substrate?branch=master#46a64ac817ec909c66203a7e0715ee111762d3f7" +dependencies = [ + "async-std", + "derive_more", + "futures-util", + "hyper 0.13.10", + "log", + "prometheus", + "tokio 0.2.25", +] + +[[package]] +name = "substrate-relay" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-std", + "async-trait", + "bp-header-chain", + "bp-kusama", + "bp-messages", + "bp-millau", + "bp-polkadot", + "bp-rialto", + "bp-rococo", + "bp-runtime", + "bp-westend", + "bridge-runtime-common", + "finality-grandpa 0.14.0", + "finality-relay", + "frame-support", + "futures 0.3.13", + "headers-relay", + "hex", + "hex-literal 0.3.1", + "log", + "messages-relay", + "millau-runtime", + "num-format", + "num-traits", + "pallet-bridge-dispatch", + "pallet-bridge-messages", + "parity-scale-codec 2.0.1", + "paste 1.0.4", + "relay-kusama-client", + "relay-millau-client", + "relay-polkadot-client", + "relay-rialto-client", + "relay-rococo-client", + "relay-substrate-client", + "relay-utils", + "relay-westend-client", + "rialto-runtime", + "sp-core", + "sp-finality-grandpa", + "sp-keyring", + "sp-runtime", + "sp-trie", + "sp-version", + "structopt", +] + +[[package]] +name = "substrate-wasm-builder-runner" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54cab12167e32b38a62c5ea5825aa0874cde315f907a46aad2b05aa8ef3d862f" + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" + +[[package]] +name = "syn" +version = "1.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "sysinfo" +version = "0.15.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de94457a09609f33fec5e7fceaf907488967c6c7c75d64da6a7ce6ffdb8b5abd" +dependencies = [ + "cc", + "cfg-if 1.0.0", + "core-foundation-sys 0.8.2", + "doc-comment", + "libc", + "ntapi", + "once_cell", + "rayon", + "winapi 0.3.9", +] + +[[package]] +name = "take_mut" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "target-lexicon" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "422045212ea98508ae3d28025bc5aaa2bd4a9cdaecd442a08da2ee620ee9ea95" + +[[package]] +name = "tempfile" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "rand 0.8.3", + "redox_syscall 0.2.5", + "remove_dir_all", + "winapi 0.3.9", +] + +[[package]] +name = "termcolor" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thiserror" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +dependencies = [ + "once_cell", +] + +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + +[[package]] +name = "time" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi 0.3.9", +] + +[[package]] +name = "time" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7" +dependencies = [ + "const_fn", + "libc", + "standback", + "stdweb", + "time-macros", + "version_check", + "winapi 0.3.9", +] + +[[package]] +name = "time-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "standback", + "syn", +] + +[[package]] +name = "tiny-bip39" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" +dependencies = [ + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.3", + "thiserror", + "unicode-normalization", + "zeroize", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "tokio" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "mio", + "num_cpus", + "tokio-codec", + "tokio-current-thread", + "tokio-executor", + "tokio-fs", + "tokio-io", + "tokio-reactor", + "tokio-sync", + "tokio-tcp", + "tokio-threadpool", + "tokio-timer", + "tokio-udp", + "tokio-uds", +] + +[[package]] +name = "tokio" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" +dependencies = [ + "bytes 0.5.6", + "fnv", + "futures-core", + "iovec", + "lazy_static", + "libc", + "memchr", + "mio", + "mio-uds", + "num_cpus", + "pin-project-lite 0.1.11", + "signal-hook-registry", + "slab", + "winapi 0.3.9", +] + +[[package]] +name = "tokio-buf" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" +dependencies = [ + "bytes 0.4.12", + "either", + "futures 0.1.31", +] + +[[package]] +name = "tokio-codec" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "tokio-io", +] + +[[package]] +name = "tokio-current-thread" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" +dependencies = [ + "futures 0.1.31", + "tokio-executor", +] + +[[package]] +name = "tokio-executor" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" +dependencies = [ + "crossbeam-utils 0.7.2", + "futures 0.1.31", +] + +[[package]] +name = "tokio-fs" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" +dependencies = [ + "futures 0.1.31", + "tokio-io", + "tokio-threadpool", +] + +[[package]] +name = "tokio-io" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "log", +] + +[[package]] +name = "tokio-named-pipes" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "mio", + "mio-named-pipes", + "tokio 0.1.22", +] + +[[package]] +name = "tokio-reactor" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" +dependencies = [ + "crossbeam-utils 0.7.2", + "futures 0.1.31", + "lazy_static", + "log", + "mio", + "num_cpus", + "parking_lot 0.9.0", + "slab", + "tokio-executor", + "tokio-io", + "tokio-sync", +] + +[[package]] +name = "tokio-rustls" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a" +dependencies = [ + "futures-core", + "rustls 0.18.1", + "tokio 0.2.25", + "webpki 0.21.4", +] + +[[package]] +name = "tokio-service" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" +dependencies = [ + "futures 0.1.31", +] + +[[package]] +name = "tokio-sync" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" +dependencies = [ + "fnv", + "futures 0.1.31", +] + +[[package]] +name = "tokio-tcp" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "iovec", + "mio", + "tokio-io", + "tokio-reactor", +] + +[[package]] +name = "tokio-threadpool" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" +dependencies = [ + "crossbeam-deque 0.7.3", + "crossbeam-queue", + "crossbeam-utils 0.7.2", + "futures 0.1.31", + "lazy_static", + "log", + "num_cpus", + "slab", + "tokio-executor", +] + +[[package]] +name = "tokio-timer" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" +dependencies = [ + "crossbeam-utils 0.7.2", + "futures 0.1.31", + "slab", + "tokio-executor", +] + +[[package]] +name = "tokio-udp" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "log", + "mio", + "tokio-codec", + "tokio-io", + "tokio-reactor", +] + +[[package]] +name = "tokio-uds" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" +dependencies = [ + "bytes 0.4.12", + "futures 0.1.31", + "iovec", + "libc", + "log", + "mio", + "mio-uds", + "tokio-codec", + "tokio-io", + "tokio-reactor", +] + +[[package]] +name = "tokio-util" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" +dependencies = [ + "bytes 0.5.6", + "futures-core", + "futures-sink", + "log", + "pin-project-lite 0.1.11", + "tokio 0.2.25", +] + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "tower-service" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" + +[[package]] +name = "tracing" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" +dependencies = [ + "cfg-if 1.0.0", + "log", + "pin-project-lite 0.2.4", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project 1.0.5", + "tracing", +] + +[[package]] +name = "tracing-log" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ab8966ac3ca27126141f7999361cc97dd6fb4b71da04c02044fa9045d98bb96" +dependencies = [ + "ansi_term 0.12.1", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec 1.6.1", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "trie-db" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec051edf7f0fc9499a2cb0947652cab2148b9d7f61cee7605e312e9f970dacaf" +dependencies = [ + "hash-db", + "hashbrown", + "log", + "rustc-hex", + "smallvec 1.6.1", +] + +[[package]] +name = "trie-root" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" +dependencies = [ + "hash-db", +] + +[[package]] +name = "triehash" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1631b201eb031b563d2e85ca18ec8092508e262a3196ce9bd10a67ec87b9f5c" +dependencies = [ + "hash-db", + "rlp", +] + +[[package]] +name = "trust-dns-proto" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.2.2", + "ipnet", + "lazy_static", + "log", + "rand 0.8.3", + "smallvec 1.6.1", + "thiserror", + "tinyvec", + "url 2.2.1", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae" +dependencies = [ + "cfg-if 1.0.0", + "futures-util", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "parking_lot 0.11.1", + "resolv-conf", + "smallvec 1.6.1", + "thiserror", + "trust-dns-proto", +] + +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" + +[[package]] +name = "twox-hash" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" +dependencies = [ + "cfg-if 0.1.10", + "rand 0.7.3", + "static_assertions", +] + +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + +[[package]] +name = "ucd-trie" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" + +[[package]] +name = "uint" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +dependencies = [ + "matches", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" + +[[package]] +name = "unicode-width" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "universal-hash" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.4.0", +] + +[[package]] +name = "unsigned-varint" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" + +[[package]] +name = "unsigned-varint" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" +dependencies = [ + "asynchronous-codec 0.5.0", + "bytes 1.0.1", + "futures-io", + "futures-util", +] + +[[package]] +name = "unsigned-varint" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" +dependencies = [ + "asynchronous-codec 0.6.0", + "bytes 1.0.1", + "futures-io", + "futures-util", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "url" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +dependencies = [ + "idna 0.1.5", + "matches", + "percent-encoding 1.0.1", +] + +[[package]] +name = "url" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" +dependencies = [ + "form_urlencoded", + "idna 0.2.2", + "matches", + "percent-encoding 2.1.0", +] + +[[package]] +name = "value-bag" +version = "1.0.0-alpha.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1" +dependencies = [ + "ctor", +] + +[[package]] +name = "vcpkg" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" + +[[package]] +name = "vec-arena" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + +[[package]] +name = "want" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" +dependencies = [ + "futures 0.1.31", + "log", + "try-lock", +] + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "wasm-bindgen" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94" +dependencies = [ + "cfg-if 1.0.0", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" + +[[package]] +name = "wasm-timer" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" +dependencies = [ + "futures 0.3.13", + "js-sys", + "parking_lot 0.11.1", + "pin-utils", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wasmi" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +dependencies = [ + "libc", + "memory_units", + "num-rational", + "num-traits", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "wasmparser" +version = "0.76.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "755a9a4afe3f6cccbbe6d7e965eef44cf260b001f93e547eba84255c1d0187d8" + +[[package]] +name = "wasmtime" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "718cb52a9fdb7ab12471e9b9d051c9adfa6b5c504e0a1fea045e5eabc81eedd9" +dependencies = [ + "anyhow", + "backtrace", + "bincode", + "cfg-if 1.0.0", + "cpp_demangle", + "indexmap", + "libc", + "log", + "paste 1.0.4", + "region", + "rustc-demangle", + "serde", + "smallvec 1.6.1", + "target-lexicon", + "wasmparser", + "wasmtime-cache", + "wasmtime-environ", + "wasmtime-fiber", + "wasmtime-jit", + "wasmtime-profiling", + "wasmtime-runtime", + "wat", + "winapi 0.3.9", +] + +[[package]] +name = "wasmtime-cache" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f984df56c4adeba91540f9052db9f7a8b3b00cfaac1a023bee50a972f588b0c" +dependencies = [ + "anyhow", + "base64 0.13.0", + "bincode", + "directories-next", + "errno", + "file-per-thread-logger", + "libc", + "log", + "serde", + "sha2 0.9.3", + "toml", + "winapi 0.3.9", + "zstd", +] + +[[package]] +name = "wasmtime-cranelift" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a05abbf94e03c2c8ee02254b1949320c4d45093de5d9d6ed4d9351d536075c9" +dependencies = [ + "cranelift-codegen", + "cranelift-entity", + "cranelift-frontend", + "cranelift-wasm", + "wasmparser", + "wasmtime-environ", +] + +[[package]] +name = "wasmtime-debug" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "382eecd6281c6c1d1f3c904c3c143e671fc1a9573820cbfa777fba45ce2eda9c" +dependencies = [ + "anyhow", + "gimli", + "more-asserts", + "object", + "target-lexicon", + "thiserror", + "wasmparser", + "wasmtime-environ", +] + +[[package]] +name = "wasmtime-environ" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81011b2b833663d7e0ce34639459a0e301e000fc7331e0298b3a27c78d0cec60" +dependencies = [ + "anyhow", + "cfg-if 1.0.0", + "cranelift-codegen", + "cranelift-entity", + "cranelift-wasm", + "gimli", + "indexmap", + "log", + "more-asserts", + "serde", + "thiserror", + "wasmparser", +] + +[[package]] +name = "wasmtime-fiber" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92da32e31af2e3d828f485f5f24651ed4d3b7f03a46ea6555eae6940d1402cd" +dependencies = [ + "cc", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "wasmtime-jit" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b5f649623859a12d361fe4cc4793de44f7c3ff34c322c5714289787e89650bb" +dependencies = [ + "addr2line", + "anyhow", + "cfg-if 1.0.0", + "cranelift-codegen", + "cranelift-entity", + "cranelift-frontend", + "cranelift-native", + "cranelift-wasm", + "gimli", + "log", + "more-asserts", + "object", + "rayon", + "region", + "serde", + "target-lexicon", + "thiserror", + "wasmparser", + "wasmtime-cranelift", + "wasmtime-debug", + "wasmtime-environ", + "wasmtime-obj", + "wasmtime-profiling", + "wasmtime-runtime", + "winapi 0.3.9", +] + +[[package]] +name = "wasmtime-obj" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2e99cd9858f57fd062e9351e07881cedfc8597928385e02a48d9333b9e15a1" +dependencies = [ + "anyhow", + "more-asserts", + "object", + "target-lexicon", + "wasmtime-debug", + "wasmtime-environ", +] + +[[package]] +name = "wasmtime-profiling" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e46c0a590e49278ba7f79ef217af9db4ecc671b50042c185093e22d73524abb2" +dependencies = [ + "anyhow", + "cfg-if 1.0.0", + "gimli", + "lazy_static", + "libc", + "object", + "scroll", + "serde", + "target-lexicon", + "wasmtime-environ", + "wasmtime-runtime", +] + +[[package]] +name = "wasmtime-runtime" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1438a09185fc7ca067caf1a80d7e5b398eefd4fb7630d94841448ade60feb3d0" +dependencies = [ + "backtrace", + "cc", + "cfg-if 1.0.0", + "indexmap", + "lazy_static", + "libc", + "log", + "memoffset 0.6.1", + "more-asserts", + "psm", + "region", + "thiserror", + "wasmtime-environ", + "winapi 0.3.9", +] + +[[package]] +name = "wast" +version = "35.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a5800e9f86a1eae935e38bea11e60fd253f6d514d153fb39b3e5535a7b37b56" +dependencies = [ + "leb128", +] + +[[package]] +name = "wat" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ec280a739b69173e0ffd12c1658507996836ba4e992ed9bc1e5385a0bd72a02" +dependencies = [ + "wast", +] + +[[package]] +name = "web-sys" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web3" +version = "0.15.0" +source = "git+https://github.com/tomusdrw/rust-web3.git?branch=td-ethabi#68dabc289bf9f5e59447d822c5da5b4c768175c6" +dependencies = [ + "arrayvec 0.5.2", + "derive_more", + "ethabi", + "ethereum-types", + "futures 0.3.13", + "futures-timer 3.0.2", + "hex", + "jsonrpc-core 17.0.0", + "log", + "parking_lot 0.11.1", + "pin-project 1.0.5", + "rlp", + "serde", + "serde_json", + "tiny-keccak", +] + +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" +dependencies = [ + "webpki 0.21.4", +] + +[[package]] +name = "wepoll-sys" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" +dependencies = [ + "cc", +] + +[[package]] +name = "which" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +dependencies = [ + "libc", +] + +[[package]] +name = "which" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87c14ef7e1b8b8ecfc75d5eca37949410046e66f15d185c01d70824f1f8111ef" +dependencies = [ + "libc", + "thiserror", +] + +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.9", +] + +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +dependencies = [ + "winapi 0.2.8", + "winapi-build", +] + +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + +[[package]] +name = "x25519-dalek" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" +dependencies = [ + "curve25519-dalek 3.0.2", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "yaml-rust" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992" + +[[package]] +name = "yamux" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" +dependencies = [ + "futures 0.3.13", + "log", + "nohash-hasher", + "parking_lot 0.11.1", + "rand 0.7.3", + "static_assertions", +] + +[[package]] +name = "zeroize" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zstd" +version = "0.6.1+zstd.1.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "3.0.1+zstd.1.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "1.4.20+zstd.1.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" +dependencies = [ + "cc", + "libc", +] diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml new file mode 100644 index 00000000000..1090a0fe5ba --- /dev/null +++ b/polkadot/Cargo.toml @@ -0,0 +1,11 @@ +[workspace] +resolver = "2" + +members = [ + "bin/*/node", + "bin/*/runtime", + "fuzz/*", + "modules/*", + "primitives/*", + "relays/*", +] diff --git a/polkadot/Dockerfile b/polkadot/Dockerfile new file mode 100644 index 00000000000..b3c4a7b4ba7 --- /dev/null +++ b/polkadot/Dockerfile @@ -0,0 +1,71 @@ +# Builds images used by the bridge. +# +# In particular, it can be used to build Substrate nodes and bridge relayers. The binary that gets +# built can be specified with the `PROJECT` build-arg. For example, to build the `substrate-relay` +# you would do the following: +# +# `docker build . -t local/substrate-relay --build-arg=PROJECT=substrate-relay` +# +# See the `deployments/README.md` for all the available `PROJECT` values. + +FROM paritytech/bridge-dependencies as builder +WORKDIR /parity-bridges-common + +COPY . . + +ARG PROJECT=ethereum-poa-relay +RUN cargo build --release --verbose -p ${PROJECT} +RUN strip ./target/release/${PROJECT} + +# In this final stage we copy over the final binary and do some checks +# to make sure that everything looks good. +FROM ubuntu:20.04 as runtime + +# show backtraces +ENV RUST_BACKTRACE 1 +ENV DEBIAN_FRONTEND=noninteractive + +RUN set -eux; \ + apt-get update && \ + apt-get install -y curl ca-certificates && \ + apt-get install -y --no-install-recommends libssl-dev && \ + update-ca-certificates && \ + groupadd -g 1000 user && \ + useradd -u 1000 -g user -s /bin/sh -m user && \ + # apt clean up + apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +# switch to non-root user +USER user + +WORKDIR /home/user + +ARG PROJECT=ethereum-poa-relay + +COPY --chown=user:user --from=builder /parity-bridges-common/target/release/${PROJECT} ./ +COPY --chown=user:user --from=builder /parity-bridges-common/deployments/local-scripts/bridge-entrypoint.sh ./ + +# check if executable works in this container +RUN ./${PROJECT} --version + +ENV PROJECT=$PROJECT +ENTRYPOINT ["/home/user/bridge-entrypoint.sh"] + +# metadata +ARG VCS_REF=master +ARG BUILD_DATE="" +ARG VERSION="" + +LABEL org.opencontainers.image.title="${PROJECT}" \ + org.opencontainers.image.description="${PROJECT} - component of Parity Bridges Common" \ + org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/Dockerfile" \ + org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/Dockerfile" \ + org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/README.md" \ + org.opencontainers.image.created="${BUILD_DATE}" \ + org.opencontainers.image.version="${VERSION}" \ + org.opencontainers.image.revision="${VCS_REF}" \ + org.opencontainers.image.authors="devops-team@parity.io" \ + org.opencontainers.image.vendor="Parity Technologies" \ + org.opencontainers.image.licenses="GPL-3.0 License" diff --git a/polkadot/LICENSE b/polkadot/LICENSE new file mode 100644 index 00000000000..733c072369c --- /dev/null +++ b/polkadot/LICENSE @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/polkadot/README.md b/polkadot/README.md new file mode 100644 index 00000000000..8f6446c8875 --- /dev/null +++ b/polkadot/README.md @@ -0,0 +1,215 @@ +# Parity Bridges Common + +This is a collection of components for building bridges. + +These components include Substrate pallets for syncing headers, passing arbitrary messages, as well +as libraries for building relayers to provide cross-chain communication capabilities. + +Three bridge nodes are also available. The nodes can be used to run test networks which bridge other +Substrate chains or Ethereum Proof-of-Authority chains. + +🚧 The bridges are currently under construction - a hardhat is recommended beyond this point 🚧 + +## Contents + +- [Installation](#installation) +- [High-Level Architecture](#high-level-architecture) +- [Project Layout](#project-layout) +- [Running the Bridge](#running-the-bridge) +- [How to send a message](#how-to-send-a-message) +- [Community](#community) + +## Installation + +To get up and running you need both stable and nightly Rust. Rust nightly is used to build the Web +Assembly (WASM) runtime for the node. You can configure the WASM support as so: + +``` +rustup install nightly +rustup target add wasm32-unknown-unknown --toolchain nightly +``` + +Once this is configured you can build and test the repo as follows: + +``` +git clone https://github.com/paritytech/parity-bridges-common.git +cd parity-bridges-common +cargo build --all +cargo test --all +``` + +If you need more information about setting up your development environment Substrate's +[Getting Started](https://substrate.dev/docs/en/knowledgebase/getting-started/) page is a good +resource. + +## High-Level Architecture + +This repo has support for bridging foreign chains together using a combination of Substrate pallets +and external processes called relayers. A bridge chain is one that is able to follow the consensus +of a foreign chain independently. For example, consider the case below where we want to bridge two +Substrate based chains. + +``` ++---------------+ +---------------+ +| | | | +| Rialto | | Millau | +| | | | ++-------+-------+ +-------+-------+ + ^ ^ + | +---------------+ | + | | | | + +-----> | Bridge Relay | <-------+ + | | + +---------------+ +``` + +The Millau chain must be able to accept Rialto headers and verify their integrity. It does this by +using a runtime module designed to track GRANDPA finality. Since two blockchains can't interact +directly they need an external service, called a relayer, to communicate. The relayer will subscribe +to new Rialto headers via RPC and submit them to the Millau chain for verification. + +Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) for more in-depth +description of the bridge interaction. + +## Project Layout + +Here's an overview of how the project is laid out. The main bits are the `node`, which is the actual +"blockchain", the `modules` which are used to build the blockchain's logic (a.k.a the runtime) and +the `relays` which are used to pass messages between chains. + +``` +├── bin // Node and Runtime for the various Substrate chains +│ └── ... +├── deployments // Useful tools for deploying test networks +│ └── ... +├── diagrams // Pretty pictures of the project architecture +│ └── ... +├── modules // Substrate Runtime Modules (a.k.a Pallets) +│ ├── ethereum // Ethereum PoA Header Sync Module +│ ├── grandpa // On-Chain GRANDPA Light Client +│ ├── messages // Cross Chain Message Passing +│ ├── dispatch // Target Chain Message Execution +│ └── ... +├── primitives // Code shared between modules, runtimes, and relays +│ └── ... +├── relays // Application for sending headers and messages between chains +│ └── ... +└── scripts // Useful development and maintenence scripts +``` + +## Running the Bridge + +To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes +on each side of the bridge (source and target chain). + +There are 3 ways to run the bridge, described below: +- building & running from source, +- building or using Docker images for each individual component, +- running a Docker Compose setup (recommended). + +### Using the Source + +First you'll need to build the bridge nodes and relay. This can be done as follows: + +```bash +# In `parity-bridges-common` folder +cargo build -p rialto-bridge-node +cargo build -p millau-bridge-node +cargo build -p substrate-relay +``` + +### Running + +To run a simple dev network you'll can use the scripts located in +[the `deployments/local-scripts` folder](./deployments/local-scripts). Since the relayer connects to +both Substrate chains it must be run last. + +```bash +# In `parity-bridges-common` folder +./deployments/local-scripts/run-rialto-bridge-node.sh +./deployments/local-scripts/run-millau-bridge-node.sh +./deployments/local-scripts/relay-millau-to-rialto.sh +``` + +At this point you should see the relayer submitting headers from the Millau Substrate chain to the +Rialto Substrate chain. + +### Local Docker Setup + +To get up and running quickly you can use published Docker images for the bridge nodes and relayer. +The images are published on [Docker Hub](https://hub.docker.com/u/paritytech). + +To run the dev network we first run the two bridge nodes: + +```bash +docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ + -it paritytech/rialto-bridge-node --dev --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external + +docker run -p 30334:30333 -p 9934:9933 -p 9945:9944 \ + -it paritytech/millau-bridge-node --dev --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external +``` + +Notice that the `docker run` command will accept all the normal Substrate flags. For local +development you should at minimum run with the `--dev` flag or else no blocks will be produced. + +Then we need to initialize and run the relayer: + +```bash +docker run --network=host -it \ + paritytech/substrate-relay init-bridge RialtoToMillau \ + --target-host localhost \ + --target-port 9945 \ + --source-host localhost \ + --source-port 9944 \ + --target-signer //Alice + +docker run --network=host -it \ + paritytech/substrate-relay relay-headers RialtoToMillau \ + --target-host localhost \ + --target-port 9945 \ + --source-host localhost \ + --source-port 9944 \ + --target-signer //Bob \ +``` + +You should now see the relayer submitting headers from the Millau chain to the Rialto chain. + +If you don't want to use the published Docker images you can build images yourself. You can do this +by running the following commands at the top level of the repository. + +```bash +# In `parity-bridges-common` folder +docker build . -t local/rialto-bridge-node --build-arg PROJECT=rialto-bridge-node +docker build . -t local/millau-bridge-node --build-arg PROJECT=millau-bridge-node +docker build . -t local/substrate-relay --build-arg PROJECT=substrate-relay +``` + +_Note: Building the node images will take a long time, so make sure you have some coffee handy._ + +Once you have the images built you can use them in the previous commands by replacing +`paritytech/` with `local/` everywhere. + +### Full Network Docker Compose Setup + +For a more sophisticated deployment which includes bidirectional header sync, message passing, +monitoring dashboards, etc. see the [Deployments README](./deployments/README.md). + +### How to send a message + +A straightforward way to interact with and test the bridge is sending messages. This is explained +in the [send message](./docs/send-message.md) document. + +## Community + +Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat +server like, for example, Discord. Most discussions around Polkadot and Substrate happen +in various Element "rooms" (channels). So, joining Element might be a good idea, anyway. + +If you are interested in information exchange and development of Polkadot related bridges please +feel free to join the [Polkadot Bridges](https://app.element.io/#/room/#bridges:web3.foundation) +Element channel. + +The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element +channel is most suited for discussions regarding Substrate itself. diff --git a/polkadot/bin/.keep b/polkadot/bin/.keep new file mode 100644 index 00000000000..e69de29bb2d diff --git a/polkadot/bin/millau/node/Cargo.toml b/polkadot/bin/millau/node/Cargo.toml new file mode 100644 index 00000000000..e31e2c871a5 --- /dev/null +++ b/polkadot/bin/millau/node/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "millau-bridge-node" +description = "Substrate node compatible with Millau runtime" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +build = "build.rs" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/parity-bridges-common/" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +jsonrpc-core = "15.1.0" +structopt = "0.3.21" + +# Bridge dependencies + +bp-messages = { path = "../../../primitives/messages" } +bp-millau= { path = "../../../primitives/chain-millau" } +bp-runtime = { path = "../../../primitives/runtime" } +millau-runtime = { path = "../runtime" } +pallet-bridge-messages = { path = "../../../modules/messages" } + +# Substrate Dependencies + +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } +node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[build-dependencies] +substrate-build-script-utils = "3.0.0" +frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = [] + +# TODO: https://github.com/paritytech/parity-bridges-common/issues/390 +# I've left the feature flag here to test our CI configuration +runtime-benchmarks = [ + # "millau-runtime/runtime-benchmarks", +] diff --git a/polkadot/bin/millau/node/build.rs b/polkadot/bin/millau/node/build.rs new file mode 100644 index 00000000000..d9b50049e26 --- /dev/null +++ b/polkadot/bin/millau/node/build.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + +fn main() { + generate_cargo_keys(); + + rerun_if_git_head_changed(); +} diff --git a/polkadot/bin/millau/node/src/chain_spec.rs b/polkadot/bin/millau/node/src/chain_spec.rs new file mode 100644 index 00000000000..f9e9502da72 --- /dev/null +++ b/polkadot/bin/millau/node/src/chain_spec.rs @@ -0,0 +1,195 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use bp_millau::derive_account_from_rialto_id; +use millau_runtime::{ + AccountId, AuraConfig, BalancesConfig, BridgeWestendGrandpaConfig, GenesisConfig, GrandpaConfig, SessionConfig, + SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, +}; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{sr25519, Pair, Public}; +use sp_finality_grandpa::AuthorityId as GrandpaId; +use sp_runtime::traits::{IdentifyAccount, Verify}; + +/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. +pub type ChainSpec = sc_service::GenericChainSpec; + +/// The chain specification option. This is expected to come in from the CLI and +/// is little more than one of a number of alternatives which can easily be converted +/// from a string (`--chain=...`) into a `ChainSpec`. +#[derive(Clone, Debug)] +pub enum Alternative { + /// Whatever the current runtime is, with just Alice as an auth. + Development, + /// Whatever the current runtime is, with simple Alice/Bob/Charlie/Dave/Eve auths. + LocalTestnet, +} + +/// Helper function to generate a crypto pair from seed +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() +} + +type AccountPublic = ::Signer; + +/// Helper function to generate an account ID from seed +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() +} + +/// Helper function to generate an authority key for Aura +pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) { + ( + get_account_id_from_seed::(s), + get_from_seed::(s), + get_from_seed::(s), + ) +} + +impl Alternative { + /// Get an actual chain config from one of the alternatives. + pub(crate) fn load(self) -> ChainSpec { + match self { + Alternative::Development => ChainSpec::from_genesis( + "Development", + "dev", + sc_service::ChainType::Development, + || { + testnet_genesis( + vec![get_authority_keys_from_seed("Alice")], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ) + }, + vec![], + None, + None, + None, + None, + ), + Alternative::LocalTestnet => ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + sc_service::ChainType::Local, + || { + testnet_genesis( + vec![ + get_authority_keys_from_seed("Alice"), + get_authority_keys_from_seed("Bob"), + get_authority_keys_from_seed("Charlie"), + get_authority_keys_from_seed("Dave"), + get_authority_keys_from_seed("Eve"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("George"), + get_account_id_from_seed::("Harry"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + get_account_id_from_seed::("George//stash"), + get_account_id_from_seed::("Harry//stash"), + pallet_bridge_messages::Pallet::< + millau_runtime::Runtime, + pallet_bridge_messages::DefaultInstance, + >::relayer_fund_account_id(), + derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Dave"), + )), + ], + true, + ) + }, + vec![], + None, + None, + None, + None, + ), + } + } +} + +fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys { + SessionKeys { aura, grandpa } +} + +fn testnet_genesis( + initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, + _enable_println: bool, +) -> GenesisConfig { + GenesisConfig { + frame_system: SystemConfig { + code: WASM_BINARY.to_vec(), + changes_trie_config: Default::default(), + }, + pallet_balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), + }, + pallet_aura: AuraConfig { + authorities: Vec::new(), + }, + pallet_grandpa: GrandpaConfig { + authorities: Vec::new(), + }, + pallet_sudo: SudoConfig { key: root_key }, + pallet_session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) + .collect::>(), + }, + pallet_bridge_grandpa_Instance1: BridgeWestendGrandpaConfig { + // for our deployments to avoid multiple same-nonces transactions: + // //Alice is already used to initialize Rialto<->Millau bridge + // => let's use //George to initialize Westend->Millau bridge + owner: Some(get_account_id_from_seed::("George")), + ..Default::default() + }, + } +} + +#[test] +fn derived_dave_account_is_as_expected() { + let dave = get_account_id_from_seed::("Dave"); + let derived: AccountId = derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave)); + assert_eq!( + derived.to_string(), + "5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string() + ); +} diff --git a/polkadot/bin/millau/node/src/cli.rs b/polkadot/bin/millau/node/src/cli.rs new file mode 100644 index 00000000000..46323ed25c9 --- /dev/null +++ b/polkadot/bin/millau/node/src/cli.rs @@ -0,0 +1,70 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use sc_cli::RunCmd; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +pub struct Cli { + #[structopt(subcommand)] + pub subcommand: Option, + + #[structopt(flatten)] + pub run: RunCmd, +} + +/// Possible subcommands of the main binary. +#[derive(Debug, StructOpt)] +pub enum Subcommand { + /// Key management cli utilities + Key(sc_cli::KeySubcommand), + + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + Verify(sc_cli::VerifyCmd), + + /// Generate a seed that provides a vanity address. + Vanity(sc_cli::VanityCmd), + + /// Sign a message, with a given (secret) key. + Sign(sc_cli::SignCmd), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// Inspect blocks or extrinsics. + Inspect(node_inspect::cli::InspectCmd), + + /// Benchmark runtime pallets. + Benchmark(frame_benchmarking_cli::BenchmarkCmd), +} diff --git a/polkadot/bin/millau/node/src/command.rs b/polkadot/bin/millau/node/src/command.rs new file mode 100644 index 00000000000..d73f9b1ac9b --- /dev/null +++ b/polkadot/bin/millau/node/src/command.rs @@ -0,0 +1,172 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{Cli, Subcommand}; +use crate::service; +use crate::service::new_partial; +use millau_runtime::{Block, RuntimeApi}; +use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; +use sc_service::PartialComponents; + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Millau Bridge Node".into() + } + + fn impl_version() -> String { + env!("CARGO_PKG_VERSION").into() + } + + fn description() -> String { + "Millau Bridge Node".into() + } + + fn author() -> String { + "Parity Technologies".into() + } + + fn support_url() -> String { + "https://github.com/paritytech/parity-bridges-common/".into() + } + + fn copyright_start_year() -> i32 { + 2019 + } + + fn executable_name() -> String { + "millau-bridge-node".into() + } + + fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { + &millau_runtime::VERSION + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(Box::new( + match id { + "" | "dev" => crate::chain_spec::Alternative::Development, + "local" => crate::chain_spec::Alternative::LocalTestnet, + _ => return Err(format!("Unsupported chain specification: {}", id)), + } + .load(), + )) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); + // make sure to set correct crypto version. + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( + millau_runtime::SS58Prefix::get() as u16, + )); + + match &cli.subcommand { + Some(Subcommand::Benchmark(cmd)) => { + if cfg!(feature = "runtime-benchmarks") { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| cmd.run::(config)) + } else { + println!( + "Benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + ); + Ok(()) + } + } + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::Sign(cmd)) => cmd.run(), + Some(Subcommand::Verify(cmd)) => cmd.run(), + Some(Subcommand::Vanity(cmd)) => cmd.run(), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + } + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, + task_manager, + import_queue, + .. + } = new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + } + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, task_manager, .. + } = new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + } + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, task_manager, .. + } = new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + } + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, + task_manager, + import_queue, + .. + } = new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + } + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + } + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, + task_manager, + backend, + .. + } = new_partial(&config)?; + Ok((cmd.run(client, backend), task_manager)) + }) + } + Some(Subcommand::Inspect(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(config)) + } + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node_until_exit(|config| async move { + match config.role { + Role::Light => service::new_light(config), + _ => service::new_full(config), + } + .map_err(sc_cli::Error::Service) + }) + } + } +} diff --git a/polkadot/bin/millau/node/src/lib.rs b/polkadot/bin/millau/node/src/lib.rs new file mode 100644 index 00000000000..382d1c2d7fb --- /dev/null +++ b/polkadot/bin/millau/node/src/lib.rs @@ -0,0 +1,32 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate Node Template CLI library. +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; + +/// Node run result. +pub type Result = sc_cli::Result<()>; + +/// Run node. +pub fn run() -> Result { + command::run() +} diff --git a/polkadot/bin/millau/node/src/main.rs b/polkadot/bin/millau/node/src/main.rs new file mode 100644 index 00000000000..cf6dd9f733a --- /dev/null +++ b/polkadot/bin/millau/node/src/main.rs @@ -0,0 +1,30 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Millau bridge node. + +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; + +/// Run the Millau Node +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/polkadot/bin/millau/node/src/service.rs b/polkadot/bin/millau/node/src/service.rs new file mode 100644 index 00000000000..8677ec2e70d --- /dev/null +++ b/polkadot/bin/millau/node/src/service.rs @@ -0,0 +1,444 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + +// ===================================================================================== +// ===================================================================================== +// ===================================================================================== +// UPDATE GUIDE: +// 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); +// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs; +// 3) fix compilation errors; +// 4) test :) +// ===================================================================================== +// ===================================================================================== +// ===================================================================================== + +use millau_runtime::{self, opaque::Block, RuntimeApi}; +use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; +use sc_executor::native_executor_instance; +pub use sc_executor::NativeExecutor; +use sc_finality_grandpa::SharedVoterState; +use sc_keystore::LocalKeystore; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; +use sp_inherents::InherentDataProviders; +use std::sync::Arc; +use std::time::Duration; + +// Our native executor instance. +native_executor_instance!( + pub Executor, + millau_runtime::api::dispatch, + millau_runtime::native_version, + frame_benchmarking::benchmarking::HostFunctions, +); + +type FullClient = sc_service::TFullClient; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; + +#[allow(clippy::type_complexity)] +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sp_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + ( + sc_consensus_aura::AuraBlockImport< + Block, + FullClient, + sc_finality_grandpa::GrandpaBlockImport, + AuraPair, + >, + sc_finality_grandpa::LinkHalf, + Option, + ), + >, + ServiceError, +> { + if config.keystore_remote.is_some() { + return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); + } + let inherent_data_providers = InherentDataProviders::new(); + + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); + + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), + )?; + + let aura_block_import = + sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); + + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: inherent_data_providers.clone(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + inherent_data_providers, + other: (aura_block_import, grandpa_link, telemetry), + }) +} + +fn remote_keystore(_url: &str) -> Result, &'static str> { + // FIXME: here would the concrete keystore be built, + // must return a concrete type (NOT `LocalKeystore`) that + // implements `CryptoStore` and `SyncCryptoStore` + Err("Remote Keystore not supported.") +} + +/// Builds a new service for a full client. +pub fn new_full(mut config: Configuration) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + mut keystore_container, + select_chain, + transaction_pool, + inherent_data_providers, + other: (block_import, grandpa_link, mut telemetry), + } = new_partial(&config)?; + + if let Some(url) = &config.keystore_remote { + match remote_keystore(url) { + Ok(k) => keystore_container.set_remote_keystore(k), + Err(e) => { + return Err(ServiceError::Other(format!( + "Error hooking up remote keystore for {}: {}", + url, e + ))) + } + }; + } + + config + .network + .extra_sets + .push(sc_finality_grandpa::grandpa_peers_set_config()); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); + } + + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let backoff_authoring_blocks: Option<()> = None; + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); + + let rpc_extensions_builder = { + use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; + + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; + use sc_rpc::DenyUnsafe; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; + + let backend = backend.clone(); + let client = client.clone(); + let pool = transaction_pool.clone(); + + let justification_stream = grandpa_link.justification_stream(); + let shared_authority_set = grandpa_link.shared_authority_set().clone(); + let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); + + Box::new(move |_, subscription_executor| { + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(SystemApi::to_delegate(FullSystem::new( + client.clone(), + pool.clone(), + DenyUnsafe::No, + ))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( + client.clone(), + ))); + io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( + shared_authority_set.clone(), + shared_voter_state.clone(), + justification_stream.clone(), + subscription_executor, + finality_proof_provider.clone(), + ))); + io + }) + }; + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder, + on_demand: None, + remote_blockchain: None, + backend, + network_status_sinks, + system_rpc_tx, + config, + telemetry: telemetry.as_mut(), + })?; + + if role.is_authority() { + let proposer_factory = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let aura = sc_consensus_aura::start_aura::(StartAuraParams { + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + client: client.clone(), + select_chain, + block_import, + proposer_factory, + inherent_data_providers, + force_authoring, + backoff_authoring_blocks, + keystore: keystore_container.sync_keystore(), + can_author_with, + sync_oracle: network.clone(), + block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; + + // the AURA authoring task is considered essential, i.e. if it + // fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking("aura", aura); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if role.is_authority() { + Some(keystore_container.sync_keystore()) + } else { + None + }; + + let grandpa_config = sc_finality_grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + is_authority: role.is_authority(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = sc_finality_grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network, + voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + task_manager + .spawn_essential_handle() + .spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?); + } + + network_starter.start_network(); + Ok(task_manager) +} + +/// Builds a new service for a light client. +pub fn new_light(mut config: Configuration) -> Result { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let (client, backend, keystore_container, mut task_manager, on_demand) = + sc_service::new_light_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; + + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + + config + .network + .extra_sets + .push(sc_finality_grandpa::grandpa_peers_set_config()); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( + config.transaction_pool.clone(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + on_demand.clone(), + )); + + let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain, + telemetry.as_ref().map(|x| x.handle()), + )?; + + let aura_block_import = + sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); + + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import, + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: InherentDataProviders::new(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::NeverCanAuthor, + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); + } + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + remote_blockchain: Some(backend.remote_blockchain()), + transaction_pool, + task_manager: &mut task_manager, + on_demand: Some(on_demand), + rpc_extensions_builder: Box::new(|_, _| ()), + config, + client, + keystore: keystore_container.sync_keystore(), + backend, + network, + network_status_sinks, + system_rpc_tx, + telemetry: telemetry.as_mut(), + })?; + + network_starter.start_network(); + + Ok(task_manager) +} diff --git a/polkadot/bin/millau/runtime/Cargo.toml b/polkadot/bin/millau/runtime/Cargo.toml new file mode 100644 index 00000000000..e1f7ed10c63 --- /dev/null +++ b/polkadot/bin/millau/runtime/Cargo.toml @@ -0,0 +1,106 @@ +[package] +name = "millau-runtime" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/parity-bridges-common/" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +hex-literal = "0.3" +serde = { version = "1.0.124", optional = true, features = ["derive"] } + +# Bridge dependencies + +bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } +bp-messages = { path = "../../../primitives/messages", default-features = false } +bp-millau = { path = "../../../primitives/chain-millau", default-features = false } +bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } +bp-runtime = { path = "../../../primitives/runtime", default-features = false } +bp-westend = { path = "../../../primitives/chain-westend", default-features = false } +bridge-runtime-common = { path = "../../runtime-common", default-features = false } +pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } +pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } +pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } + +# Substrate Dependencies + +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[build-dependencies] +wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } + +[features] +default = ["std"] +std = [ + "bp-header-chain/std", + "bp-messages/std", + "bp-millau/std", + "bp-rialto/std", + "bp-runtime/std", + "bp-westend/std", + "bridge-runtime-common/std", + "codec/std", + "frame-executive/std", + "frame-support/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "pallet-aura/std", + "pallet-balances/std", + "pallet-bridge-dispatch/std", + "pallet-bridge-grandpa/std", + "pallet-bridge-messages/std", + "pallet-grandpa/std", + "pallet-randomness-collective-flip/std", + "pallet-session/std", + "pallet-shift-session-manager/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "serde", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-finality-grandpa/std", + "sp-inherents/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-transaction-pool/std", + "sp-trie/std", + "sp-version/std", +] +# TODO: https://github.com/paritytech/parity-bridges-common/issues/390 +# I've left the feature flag here to test our CI configuration +runtime-benchmarks = [] diff --git a/polkadot/bin/millau/runtime/build.rs b/polkadot/bin/millau/runtime/build.rs new file mode 100644 index 00000000000..dcb5cb06218 --- /dev/null +++ b/polkadot/bin/millau/runtime/build.rs @@ -0,0 +1,26 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use wasm_builder_runner::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates("1.0.11") + .export_heap_base() + .import_memory() + .build() +} diff --git a/polkadot/bin/millau/runtime/src/lib.rs b/polkadot/bin/millau/runtime/src/lib.rs new file mode 100644 index 00000000000..30cf1bd87cd --- /dev/null +++ b/polkadot/bin/millau/runtime/src/lib.rs @@ -0,0 +1,707 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The Millau runtime. This can be compiled with `#[no_std]`, ready for Wasm. + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] +// Runtime-generated DecodeLimit::decode_all_With_depth_limit +#![allow(clippy::unnecessary_mut_passed)] +// From construct_runtime macro +#![allow(clippy::from_over_into)] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod rialto_messages; + +use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge}; + +use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge}; +use codec::Decode; +use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; +use sp_api::impl_runtime_apis; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, MultiSigner, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +// A few exports that help ease life for downstream crates. +pub use frame_support::{ + construct_runtime, parameter_types, + traits::{Currency, ExistenceRequirement, Imbalance, KeyOwnerProofSystem, Randomness}, + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, RuntimeDbWeight, Weight}, + StorageValue, +}; + +pub use frame_system::Call as SystemCall; +pub use pallet_balances::Call as BalancesCall; +pub use pallet_bridge_grandpa::Call as BridgeGrandpaRialtoCall; +pub use pallet_bridge_grandpa::Call as BridgeGrandpaWestendCall; +pub use pallet_bridge_messages::Call as MessagesCall; +pub use pallet_sudo::Call as SudoCall; +pub use pallet_timestamp::Call as TimestampCall; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; + +/// An index to a block. +pub type BlockNumber = bp_millau::BlockNumber; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = bp_millau::Signature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = bp_millau::AccountId; + +/// The type for looking up accounts. We don't expect more than 4 billion of them, but you +/// never know... +pub type AccountIndex = u32; + +/// Balance of an account. +pub type Balance = bp_millau::Balance; + +/// Index of a transaction in the chain. +pub type Index = u32; + +/// A hash of some data used by the chain. +pub type Hash = bp_millau::Hash; + +/// Hashing algorithm used by the chain. +pub type Hashing = bp_millau::Hasher; + +/// Digest item type. +pub type DigestItem = generic::DigestItem; + +/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know +/// the specifics of the runtime. They can then be made to be agnostic over specific formats +/// of data like extrinsics, allowing for them to continue syncing the network through upgrades +/// to even the core data structures. +pub mod opaque { + use super::*; + + pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + + /// Opaque block header type. + pub type Header = generic::Header; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; +} + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + pub grandpa: Grandpa, + } +} + +/// This runtime version. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("millau-runtime"), + impl_name: create_runtime_str!("millau-runtime"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } +} + +parameter_types! { + pub const BlockHashCount: BlockNumber = 250; + pub const Version: RuntimeVersion = VERSION; + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 60_000_000, // ~0.06 ms = ~60 µs + write: 200_000_000, // ~0.2 ms = 200 µs + }; + pub const SS58Prefix: u8 = 60; +} + +impl frame_system::Config for Runtime { + /// The basic call filter to use in dispatchable. + type BaseCallFilter = (); + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type Call = Call; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = IdentityLookup; + /// The index type for storing how many extrinsics an account has signed. + type Index = Index; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = Hashing; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous origin type. + type Origin = Origin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Version of the runtime. + type Version = Version; + /// Provides information about the pallet setup in the runtime. + type PalletInfo = PalletInfo; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + /// Weight information for the extrinsics of this pallet. + type SystemWeightInfo = (); + /// Block and extrinsics weights: base values and limits. + type BlockWeights = bp_millau::BlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = bp_millau::BlockLength; + /// The weight of database operations that the runtime can invoke. + type DbWeight = DbWeight; + /// The designated SS58 prefix of this chain. + type SS58Prefix = SS58Prefix; + /// The set code logic, just the default since we're not a parachain. + type OnSetCode = (); +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; +} +impl pallet_bridge_dispatch::Config for Runtime { + type Event = Event; + type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce); + type Call = Call; + type CallFilter = (); + type EncodedCall = crate::rialto_messages::FromRialtoEncodedCall; + type SourceChainAccountId = bp_rialto::AccountId; + type TargetChainAccountPublic = MultiSigner; + type TargetChainSignature = MultiSignature; + type AccountIdConverter = bp_millau::AccountIdConverter; +} + +impl pallet_grandpa::Config for Runtime { + type Event = Event; + type Call = Call; + type KeyOwnerProofSystem = (); + type KeyOwnerProof = >::Proof; + type KeyOwnerIdentification = + >::IdentificationTuple; + type HandleEquivocation = (); + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); +} + +parameter_types! { + pub const MinimumPeriod: u64 = bp_millau::SLOT_DURATION / 2; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = MinimumPeriod; + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: bp_millau::Balance = 500; + // For weight estimation, we assume that the most locks on an individual account will be 50. + // This number may need to be adjusted in the future if this assumption no longer holds true. + pub const MaxLocks: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); + type MaxLocks = MaxLocks; +} + +parameter_types! { + pub const TransactionBaseFee: Balance = 0; + pub const TransactionByteFee: Balance = 1; +} + +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl pallet_sudo::Config for Runtime { + type Event = Event; + type Call = Call; +} + +parameter_types! { + /// Authorities are changing every 5 minutes. + pub const Period: BlockNumber = bp_millau::SESSION_LENGTH; + pub const Offset: BlockNumber = 0; +} + +impl pallet_session::Config for Runtime { + type Event = Event; + type ValidatorId = ::AccountId; + type ValidatorIdOf = (); + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = pallet_shift_session_manager::Pallet; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type DisabledValidatorsThreshold = (); + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); +} + +parameter_types! { + // This is a pretty unscientific cap. + // + // Note that once this is hit the pallet will essentially throttle incoming requests down to one + // call per block. + pub const MaxRequests: u32 = 50; + pub const WestendValidatorCount: u32 = 255; + + // Number of headers to keep. + // + // Assuming the worst case of every header being finalized, we will keep headers for at least a + // week. + pub const HeadersToKeep: u32 = 7 * bp_millau::DAYS as u32; +} + +pub type RialtoGrandpaInstance = (); +impl pallet_bridge_grandpa::Config for Runtime { + type BridgedChain = bp_rialto::Rialto; + type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + + // TODO [#391]: Use weights generated for the Millau runtime instead of Rialto ones. + type WeightInfo = pallet_bridge_grandpa::weights::RialtoWeight; +} + +pub type WestendGrandpaInstance = pallet_bridge_grandpa::Instance1; +impl pallet_bridge_grandpa::Config for Runtime { + type BridgedChain = bp_westend::Westend; + type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + + // TODO [#391]: Use weights generated for the Millau runtime instead of Rialto ones. + type WeightInfo = pallet_bridge_grandpa::weights::RialtoWeight; +} + +impl pallet_shift_session_manager::Config for Runtime {} + +parameter_types! { + pub const MaxMessagesToPruneAtOnce: bp_messages::MessageNonce = 8; + pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = + bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE; + pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = + bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE; + // `IdentityFee` is used by Millau => we may use weight directly + pub const GetDeliveryConfirmationTransactionFee: Balance = + bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; + pub const RootAccountForPayments: Option = None; +} + +/// Instance of the messages pallet used to relay messages to/from Rialto chain. +pub type WithRialtoMessagesInstance = pallet_bridge_messages::DefaultInstance; + +impl pallet_bridge_messages::Config for Runtime { + type Event = Event; + // TODO: https://github.com/paritytech/parity-bridges-common/issues/390 + type WeightInfo = pallet_bridge_messages::weights::RialtoWeight; + type Parameter = rialto_messages::MillauToRialtoMessagesParameter; + type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; + type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; + type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; + + type OutboundPayload = crate::rialto_messages::ToRialtoMessagePayload; + type OutboundMessageFee = Balance; + + type InboundPayload = crate::rialto_messages::FromRialtoMessagePayload; + type InboundMessageFee = bp_rialto::Balance; + type InboundRelayer = bp_rialto::AccountId; + + type AccountIdConverter = bp_millau::AccountIdConverter; + + type TargetHeaderChain = crate::rialto_messages::Rialto; + type LaneMessageVerifier = crate::rialto_messages::ToRialtoMessageVerifier; + type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments< + Runtime, + pallet_balances::Pallet, + GetDeliveryConfirmationTransactionFee, + RootAccountForPayments, + >; + + type SourceHeaderChain = crate::rialto_messages::Rialto; + type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch; +} + +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = opaque::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + BridgeRialtoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event}, + BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, + BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, + BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Config, Storage}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Aura: pallet_aura::{Pallet, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + ShiftSessionManager: pallet_shift_session_manager::{Pallet}, + } +); + +/// The address format for describing accounts. +pub type Address = AccountId; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = + frame_executive::Executive, Runtime, AllPallets>; + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block); + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + RandomnessCollectiveFlip::random_seed().0 + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Index { + System::account_nonce(account) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + > for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + NumberFor, + >, + key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Grandpa::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + _authority_id: GrandpaId, + ) -> Option { + // NOTE: this is the only implementation possible since we've + // defined our key owner proof type as a bottom type (i.e. a type + // with no values). + None + } + } + + impl bp_rialto::RialtoFinalityApi for Runtime { + fn best_finalized() -> (bp_rialto::BlockNumber, bp_rialto::Hash) { + let header = BridgeRialtoGrandpa::best_finalized(); + (header.number, header.hash()) + } + + fn is_known_header(hash: bp_rialto::Hash) -> bool { + BridgeRialtoGrandpa::is_known_header(hash) + } + } + + impl bp_westend::WestendFinalityApi for Runtime { + fn best_finalized() -> (bp_westend::BlockNumber, bp_westend::Hash) { + let header = BridgeWestendGrandpa::best_finalized(); + (header.number, header.hash()) + } + + fn is_known_header(hash: bp_westend::Hash) -> bool { + BridgeWestendGrandpa::is_known_header(hash) + } + } + + impl bp_rialto::ToRialtoOutboundLaneApi for Runtime { + fn estimate_message_delivery_and_dispatch_fee( + _lane_id: bp_messages::LaneId, + payload: ToRialtoMessagePayload, + ) -> Option { + estimate_message_dispatch_and_delivery_fee::( + &payload, + WithRialtoMessageBridge::RELAYER_FEE_PERCENT, + ).ok() + } + + fn messages_dispatch_weight( + lane: bp_messages::LaneId, + begin: bp_messages::MessageNonce, + end: bp_messages::MessageNonce, + ) -> Vec<(bp_messages::MessageNonce, Weight, u32)> { + (begin..=end).filter_map(|nonce| { + let encoded_payload = BridgeRialtoMessages::outbound_message_payload(lane, nonce)?; + let decoded_payload = rialto_messages::ToRialtoMessagePayload::decode( + &mut &encoded_payload[..] + ).ok()?; + Some((nonce, decoded_payload.weight, encoded_payload.len() as _)) + }) + .collect() + } + + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::outbound_latest_received_nonce(lane) + } + + fn latest_generated_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::outbound_latest_generated_nonce(lane) + } + } + + impl bp_rialto::FromRialtoInboundLaneApi for Runtime { + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::inbound_latest_received_nonce(lane) + } + + fn latest_confirmed_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeRialtoMessages::inbound_latest_confirmed_nonce(lane) + } + + fn unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState { + BridgeRialtoMessages::inbound_unrewarded_relayers_state(lane) + } + } +} + +/// Rialto account ownership digest from Millau. +/// +/// The byte vector returned by this function should be signed with a Rialto account private key. +/// This way, the owner of `millau_account_id` on Millau proves that the Rialto account private key +/// is also under his control. +pub fn rialto_account_ownership_digest( + rialto_call: &Call, + millau_account_id: AccountId, + rialto_spec_version: SpecVersion, +) -> sp_std::vec::Vec +where + Call: codec::Encode, + AccountId: codec::Encode, + SpecVersion: codec::Encode, +{ + pallet_bridge_dispatch::account_ownership_digest( + rialto_call, + millau_account_id, + rialto_spec_version, + bp_runtime::MILLAU_BRIDGE_INSTANCE, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use bridge_runtime_common::messages; + + #[test] + fn ensure_millau_message_lane_weights_are_correct() { + // TODO: https://github.com/paritytech/parity-bridges-common/issues/390 + type Weights = pallet_bridge_messages::weights::RialtoWeight; + + pallet_bridge_messages::ensure_weights_are_correct::( + bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, + bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, + bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + ); + + let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add( + messages::target::maximal_incoming_message_size(bp_millau::max_extrinsic_size()), + ); + pallet_bridge_messages::ensure_able_to_receive_message::( + bp_millau::max_extrinsic_size(), + bp_millau::max_extrinsic_weight(), + max_incoming_message_proof_size, + messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()), + ); + + let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( + bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, + ) + .unwrap_or(u32::MAX); + pallet_bridge_messages::ensure_able_to_receive_confirmation::( + bp_millau::max_extrinsic_size(), + bp_millau::max_extrinsic_weight(), + max_incoming_inbound_lane_data_proof_size, + bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + ); + } +} diff --git a/polkadot/bin/millau/runtime/src/rialto_messages.rs b/polkadot/bin/millau/runtime/src/rialto_messages.rs new file mode 100644 index 00000000000..a800117dc55 --- /dev/null +++ b/polkadot/bin/millau/runtime/src/rialto_messages.rs @@ -0,0 +1,253 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Everything required to serve Millau <-> Rialto messages. + +use crate::Runtime; + +use bp_messages::{ + source_chain::TargetHeaderChain, + target_chain::{ProvedMessages, SourceHeaderChain}, + InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, +}; +use bp_runtime::{InstanceId, RIALTO_BRIDGE_INSTANCE}; +use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; +use codec::{Decode, Encode}; +use frame_support::{ + parameter_types, + weights::{DispatchClass, Weight}, + RuntimeDebug, +}; +use sp_runtime::{FixedPointNumber, FixedU128}; +use sp_std::{convert::TryFrom, ops::RangeInclusive}; + +/// Initial value of `RialtoToMillauConversionRate` parameter. +pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV); + +parameter_types! { + /// Rialto to Millau conversion rate. Initially we treat both tokens as equal. + pub storage RialtoToMillauConversionRate: FixedU128 = INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE; +} + +/// Message payload for Millau -> Rialto messages. +pub type ToRialtoMessagePayload = messages::source::FromThisChainMessagePayload; + +/// Message verifier for Millau -> Rialto messages. +pub type ToRialtoMessageVerifier = messages::source::FromThisChainMessageVerifier; + +/// Message payload for Rialto -> Millau messages. +pub type FromRialtoMessagePayload = messages::target::FromBridgedChainMessagePayload; + +/// Encoded Millau Call as it comes from Rialto. +pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; + +/// Messages proof for Rialto -> Millau messages. +type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof; + +/// Messages delivery proof for Millau -> Rialto messages. +type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof; + +/// Call-dispatch based message dispatch for Rialto -> Millau messages. +pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch< + WithRialtoMessageBridge, + crate::Runtime, + pallet_bridge_dispatch::DefaultInstance, +>; + +/// Millau <-> Rialto message bridge. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct WithRialtoMessageBridge; + +impl MessageBridge for WithRialtoMessageBridge { + const INSTANCE: InstanceId = RIALTO_BRIDGE_INSTANCE; + + const RELAYER_FEE_PERCENT: u32 = 10; + + type ThisChain = Millau; + type BridgedChain = Rialto; + + fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance { + bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance)) + .unwrap_or(bp_millau::Balance::MAX) + } +} + +/// Millau chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct Millau; + +impl messages::ChainWithMessages for Millau { + type Hash = bp_millau::Hash; + type AccountId = bp_millau::AccountId; + type Signer = bp_millau::AccountSigner; + type Signature = bp_millau::Signature; + type Weight = Weight; + type Balance = bp_millau::Balance; + + type MessagesInstance = crate::WithRialtoMessagesInstance; +} + +impl messages::ThisChainWithMessages for Millau { + type Call = crate::Call; + + fn is_outbound_lane_enabled(lane: &LaneId) -> bool { + *lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1] + } + + fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { + MessageNonce::MAX + } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction { + let inbound_data_size = + InboundLaneData::::encoded_size_hint(bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) + .unwrap_or(u32::MAX); + + MessageTransaction { + dispatch_weight: bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + size: inbound_data_size + .saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_millau::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } +} + +/// Rialto chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct Rialto; + +impl messages::ChainWithMessages for Rialto { + type Hash = bp_rialto::Hash; + type AccountId = bp_rialto::AccountId; + type Signer = bp_rialto::AccountSigner; + type Signature = bp_rialto::Signature; + type Weight = Weight; + type Balance = bp_rialto::Balance; + + type MessagesInstance = pallet_bridge_messages::DefaultInstance; +} + +impl messages::BridgedChainWithMessages for Rialto { + fn maximal_extrinsic_size() -> u32 { + bp_rialto::max_extrinsic_size() + } + + fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { + // we don't want to relay too large messages + keep reserve for future upgrades + let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); + + // we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment` function + // + // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about + // minimal dispatch weight here + + 0..=upper_limit + } + + fn estimate_delivery_transaction( + message_payload: &[u8], + message_dispatch_weight: Weight, + ) -> MessageTransaction { + let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); + let extra_bytes_in_payload = Weight::from(message_payload_len) + .saturating_sub(pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); + + MessageTransaction { + dispatch_weight: extra_bytes_in_payload + .saturating_mul(bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) + .saturating_add(bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) + .saturating_add(message_dispatch_weight), + size: message_payload_len + .saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_rialto::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } +} + +impl TargetHeaderChain for Rialto { + type Error = &'static str; + // The proof is: + // - hash of the header this proof has been created with; + // - the storage proof or one or several keys; + // - id of the lane we prove state of. + type MessagesDeliveryProof = ToRialtoMessagesDeliveryProof; + + fn verify_message(payload: &ToRialtoMessagePayload) -> Result<(), Self::Error> { + messages::source::verify_chain_message::(payload) + } + + fn verify_messages_delivery_proof( + proof: Self::MessagesDeliveryProof, + ) -> Result<(LaneId, InboundLaneData), Self::Error> { + messages::source::verify_messages_delivery_proof::(proof) + } +} + +impl SourceHeaderChain for Rialto { + type Error = &'static str; + // The proof is: + // - hash of the header this proof has been created with; + // - the storage proof or one or several keys; + // - id of the lane we prove messages for; + // - inclusive range of messages nonces that are proved. + type MessagesProof = FromRialtoMessagesProof; + + fn verify_messages_proof( + proof: Self::MessagesProof, + messages_count: u32, + ) -> Result>, Self::Error> { + messages::target::verify_messages_proof::(proof, messages_count) + } +} + +/// Millau -> Rialto message lane pallet parameters. +#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] +pub enum MillauToRialtoMessagesParameter { + /// The conversion formula we use is: `MillauTokens = RialtoTokens * conversion_rate`. + RialtoToMillauConversionRate(FixedU128), +} + +impl MessagesParameter for MillauToRialtoMessagesParameter { + fn save(&self) { + match *self { + MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) => { + RialtoToMillauConversionRate::set(conversion_rate) + } + } + } +} diff --git a/polkadot/bin/rialto/node/Cargo.toml b/polkadot/bin/rialto/node/Cargo.toml new file mode 100644 index 00000000000..a51ee7a5ab5 --- /dev/null +++ b/polkadot/bin/rialto/node/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "rialto-bridge-node" +description = "Substrate node compatible with Rialto runtime" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +build = "build.rs" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/parity-bridges-common/" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +jsonrpc-core = "15.1.0" +structopt = "0.3.21" + +# Bridge dependencies + +bp-messages = { path = "../../../primitives/messages" } +bp-runtime = { path = "../../../primitives/runtime" } +bp-rialto = { path = "../../../primitives/chain-rialto" } +pallet-bridge-messages = { path = "../../../modules/messages" } +rialto-runtime = { path = "../runtime" } + +# Substrate Dependencies + + +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } +node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[build-dependencies] +substrate-build-script-utils = "3.0.0" +frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = [] +runtime-benchmarks = [ + "rialto-runtime/runtime-benchmarks", +] diff --git a/polkadot/bin/rialto/node/build.rs b/polkadot/bin/rialto/node/build.rs new file mode 100644 index 00000000000..d9b50049e26 --- /dev/null +++ b/polkadot/bin/rialto/node/build.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + +fn main() { + generate_cargo_keys(); + + rerun_if_git_head_changed(); +} diff --git a/polkadot/bin/rialto/node/src/chain_spec.rs b/polkadot/bin/rialto/node/src/chain_spec.rs new file mode 100644 index 00000000000..732cf1a4b13 --- /dev/null +++ b/polkadot/bin/rialto/node/src/chain_spec.rs @@ -0,0 +1,206 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use bp_rialto::derive_account_from_millau_id; +use rialto_runtime::{ + AccountId, AuraConfig, BalancesConfig, BridgeKovanConfig, BridgeRialtoPoAConfig, GenesisConfig, GrandpaConfig, + SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY, +}; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{sr25519, Pair, Public}; +use sp_finality_grandpa::AuthorityId as GrandpaId; +use sp_runtime::traits::{IdentifyAccount, Verify}; + +/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. +pub type ChainSpec = sc_service::GenericChainSpec; + +/// The chain specification option. This is expected to come in from the CLI and +/// is little more than one of a number of alternatives which can easily be converted +/// from a string (`--chain=...`) into a `ChainSpec`. +#[derive(Clone, Debug)] +pub enum Alternative { + /// Whatever the current runtime is, with just Alice as an auth. + Development, + /// Whatever the current runtime is, with simple Alice/Bob/Charlie/Dave/Eve auths. + LocalTestnet, +} + +/// Helper function to generate a crypto pair from seed +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() +} + +type AccountPublic = ::Signer; + +/// Helper function to generate an account ID from seed +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, +{ + AccountPublic::from(get_from_seed::(seed)).into_account() +} + +/// Helper function to generate an authority key for Aura +pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) { + ( + get_account_id_from_seed::(s), + get_from_seed::(s), + get_from_seed::(s), + ) +} + +impl Alternative { + /// Get an actual chain config from one of the alternatives. + pub(crate) fn load(self) -> ChainSpec { + match self { + Alternative::Development => ChainSpec::from_genesis( + "Development", + "dev", + sc_service::ChainType::Development, + || { + testnet_genesis( + vec![get_authority_keys_from_seed("Alice")], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ) + }, + vec![], + None, + None, + None, + None, + ), + Alternative::LocalTestnet => ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + sc_service::ChainType::Local, + || { + testnet_genesis( + vec![ + get_authority_keys_from_seed("Alice"), + get_authority_keys_from_seed("Bob"), + get_authority_keys_from_seed("Charlie"), + get_authority_keys_from_seed("Dave"), + get_authority_keys_from_seed("Eve"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("George"), + get_account_id_from_seed::("Harry"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + get_account_id_from_seed::("George//stash"), + get_account_id_from_seed::("Harry//stash"), + pallet_bridge_messages::Pallet::< + rialto_runtime::Runtime, + pallet_bridge_messages::DefaultInstance, + >::relayer_fund_account_id(), + derive_account_from_millau_id(bp_runtime::SourceAccount::Account( + get_account_id_from_seed::("Dave"), + )), + ], + true, + ) + }, + vec![], + None, + None, + None, + None, + ), + } + } +} + +fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys { + SessionKeys { aura, grandpa } +} + +fn testnet_genesis( + initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, + _enable_println: bool, +) -> GenesisConfig { + GenesisConfig { + frame_system: SystemConfig { + code: WASM_BINARY.to_vec(), + changes_trie_config: Default::default(), + }, + pallet_balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), + }, + pallet_aura: AuraConfig { + authorities: Vec::new(), + }, + pallet_bridge_eth_poa_Instance1: load_rialto_poa_bridge_config(), + pallet_bridge_eth_poa_Instance2: load_kovan_bridge_config(), + pallet_grandpa: GrandpaConfig { + authorities: Vec::new(), + }, + pallet_sudo: SudoConfig { key: root_key }, + pallet_session: SessionConfig { + keys: initial_authorities + .iter() + .map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone()))) + .collect::>(), + }, + } +} + +fn load_rialto_poa_bridge_config() -> BridgeRialtoPoAConfig { + BridgeRialtoPoAConfig { + initial_header: rialto_runtime::rialto_poa::genesis_header(), + initial_difficulty: 0.into(), + initial_validators: rialto_runtime::rialto_poa::genesis_validators(), + } +} + +fn load_kovan_bridge_config() -> BridgeKovanConfig { + BridgeKovanConfig { + initial_header: rialto_runtime::kovan::genesis_header(), + initial_difficulty: 0.into(), + initial_validators: rialto_runtime::kovan::genesis_validators(), + } +} + +#[test] +fn derived_dave_account_is_as_expected() { + let dave = get_account_id_from_seed::("Dave"); + let derived: AccountId = derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave)); + assert_eq!( + derived.to_string(), + "5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string() + ); +} diff --git a/polkadot/bin/rialto/node/src/cli.rs b/polkadot/bin/rialto/node/src/cli.rs new file mode 100644 index 00000000000..46323ed25c9 --- /dev/null +++ b/polkadot/bin/rialto/node/src/cli.rs @@ -0,0 +1,70 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use sc_cli::RunCmd; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +pub struct Cli { + #[structopt(subcommand)] + pub subcommand: Option, + + #[structopt(flatten)] + pub run: RunCmd, +} + +/// Possible subcommands of the main binary. +#[derive(Debug, StructOpt)] +pub enum Subcommand { + /// Key management cli utilities + Key(sc_cli::KeySubcommand), + + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. + Verify(sc_cli::VerifyCmd), + + /// Generate a seed that provides a vanity address. + Vanity(sc_cli::VanityCmd), + + /// Sign a message, with a given (secret) key. + Sign(sc_cli::SignCmd), + + /// Build a chain specification. + BuildSpec(sc_cli::BuildSpecCmd), + + /// Validate blocks. + CheckBlock(sc_cli::CheckBlockCmd), + + /// Export blocks. + ExportBlocks(sc_cli::ExportBlocksCmd), + + /// Export the state of a given block into a chain spec. + ExportState(sc_cli::ExportStateCmd), + + /// Import blocks. + ImportBlocks(sc_cli::ImportBlocksCmd), + + /// Remove the whole chain. + PurgeChain(sc_cli::PurgeChainCmd), + + /// Revert the chain to a previous state. + Revert(sc_cli::RevertCmd), + + /// Inspect blocks or extrinsics. + Inspect(node_inspect::cli::InspectCmd), + + /// Benchmark runtime pallets. + Benchmark(frame_benchmarking_cli::BenchmarkCmd), +} diff --git a/polkadot/bin/rialto/node/src/command.rs b/polkadot/bin/rialto/node/src/command.rs new file mode 100644 index 00000000000..a9930c57417 --- /dev/null +++ b/polkadot/bin/rialto/node/src/command.rs @@ -0,0 +1,172 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{Cli, Subcommand}; +use crate::service; +use crate::service::new_partial; +use rialto_runtime::{Block, RuntimeApi}; +use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; +use sc_service::PartialComponents; + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Rialto Bridge Node".into() + } + + fn impl_version() -> String { + env!("CARGO_PKG_VERSION").into() + } + + fn description() -> String { + "Rialto Bridge Node".into() + } + + fn author() -> String { + "Parity Technologies".into() + } + + fn support_url() -> String { + "https://github.com/paritytech/parity-bridges-common/".into() + } + + fn copyright_start_year() -> i32 { + 2019 + } + + fn executable_name() -> String { + "rialto-bridge-node".into() + } + + fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { + &rialto_runtime::VERSION + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(Box::new( + match id { + "" | "dev" => crate::chain_spec::Alternative::Development, + "local" => crate::chain_spec::Alternative::LocalTestnet, + _ => return Err(format!("Unsupported chain specification: {}", id)), + } + .load(), + )) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); + sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom( + rialto_runtime::SS58Prefix::get() as u16, + )); + + match &cli.subcommand { + Some(Subcommand::Benchmark(cmd)) => { + if cfg!(feature = "runtime-benchmarks") { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| cmd.run::(config)) + } else { + println!( + "Benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + ); + Ok(()) + } + } + Some(Subcommand::Key(cmd)) => cmd.run(&cli), + Some(Subcommand::Sign(cmd)) => cmd.run(), + Some(Subcommand::Verify(cmd)) => cmd.run(), + Some(Subcommand::Vanity(cmd)) => cmd.run(), + Some(Subcommand::BuildSpec(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) + } + Some(Subcommand::CheckBlock(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, + task_manager, + import_queue, + .. + } = new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + } + Some(Subcommand::ExportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, task_manager, .. + } = new_partial(&config)?; + Ok((cmd.run(client, config.database), task_manager)) + }) + } + Some(Subcommand::ExportState(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, task_manager, .. + } = new_partial(&config)?; + Ok((cmd.run(client, config.chain_spec), task_manager)) + }) + } + Some(Subcommand::ImportBlocks(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, + task_manager, + import_queue, + .. + } = new_partial(&config)?; + Ok((cmd.run(client, import_queue), task_manager)) + }) + } + Some(Subcommand::PurgeChain(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run(config.database)) + } + Some(Subcommand::Revert(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + let PartialComponents { + client, + task_manager, + backend, + .. + } = new_partial(&config)?; + Ok((cmd.run(client, backend), task_manager)) + }) + } + Some(Subcommand::Inspect(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.sync_run(|config| cmd.run::(config)) + } + None => { + let runner = cli.create_runner(&cli.run)?; + runner + .run_node_until_exit(|config| async move { + match config.role { + Role::Light => service::new_light(config), + _ => service::new_full(config), + } + }) + .map_err(sc_cli::Error::Service) + } + } +} diff --git a/polkadot/bin/rialto/node/src/main.rs b/polkadot/bin/rialto/node/src/main.rs new file mode 100644 index 00000000000..f319d1437a9 --- /dev/null +++ b/polkadot/bin/rialto/node/src/main.rs @@ -0,0 +1,30 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto bridge node. + +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; + +/// Run the Rialto Node +fn main() -> sc_cli::Result<()> { + command::run() +} diff --git a/polkadot/bin/rialto/node/src/service.rs b/polkadot/bin/rialto/node/src/service.rs new file mode 100644 index 00000000000..841202ac7bf --- /dev/null +++ b/polkadot/bin/rialto/node/src/service.rs @@ -0,0 +1,445 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + +// ===================================================================================== +// ===================================================================================== +// ===================================================================================== +// UPDATE GUIDE: +// 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); +// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs; +// 3) fix compilation errors; +// 4) test :) +// ===================================================================================== +// ===================================================================================== +// ===================================================================================== + +use rialto_runtime::{self, opaque::Block, RuntimeApi}; +use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; +use sc_executor::native_executor_instance; +pub use sc_executor::NativeExecutor; +use sc_finality_grandpa::SharedVoterState; +use sc_keystore::LocalKeystore; +use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; +use sc_telemetry::{Telemetry, TelemetryWorker}; +use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; +use sp_inherents::InherentDataProviders; +use std::sync::Arc; +use std::time::Duration; + +// Our native executor instance. +native_executor_instance!( + pub Executor, + rialto_runtime::api::dispatch, + rialto_runtime::native_version, + frame_benchmarking::benchmarking::HostFunctions, +); + +type FullClient = sc_service::TFullClient; +type FullBackend = sc_service::TFullBackend; +type FullSelectChain = sc_consensus::LongestChain; + +#[allow(clippy::type_complexity)] +pub fn new_partial( + config: &Configuration, +) -> Result< + sc_service::PartialComponents< + FullClient, + FullBackend, + FullSelectChain, + sp_consensus::DefaultImportQueue, + sc_transaction_pool::FullPool, + ( + sc_consensus_aura::AuraBlockImport< + Block, + FullClient, + sc_finality_grandpa::GrandpaBlockImport, + AuraPair, + >, + sc_finality_grandpa::LinkHalf, + Option, + ), + >, + ServiceError, +> { + if config.keystore_remote.is_some() { + return Err(ServiceError::Other("Remote Keystores are not supported.".to_string())); + } + let inherent_data_providers = InherentDataProviders::new(); + + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; + let client = Arc::new(client); + + let telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = sc_transaction_pool::BasicPool::new_full( + config.transaction_pool.clone(), + config.role.is_authority().into(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + ); + + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain.clone(), + telemetry.as_ref().map(|x| x.handle()), + )?; + + let aura_block_import = + sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); + + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import.clone(), + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: inherent_data_providers.clone(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; + + Ok(sc_service::PartialComponents { + client, + backend, + task_manager, + import_queue, + keystore_container, + select_chain, + transaction_pool, + inherent_data_providers, + other: (aura_block_import, grandpa_link, telemetry), + }) +} + +fn remote_keystore(_url: &str) -> Result, &'static str> { + // FIXME: here would the concrete keystore be built, + // must return a concrete type (NOT `LocalKeystore`) that + // implements `CryptoStore` and `SyncCryptoStore` + Err("Remote Keystore not supported.") +} + +/// Builds a new service for a full client. +pub fn new_full(mut config: Configuration) -> Result { + let sc_service::PartialComponents { + client, + backend, + mut task_manager, + import_queue, + mut keystore_container, + select_chain, + transaction_pool, + inherent_data_providers, + other: (block_import, grandpa_link, mut telemetry), + } = new_partial(&config)?; + + if let Some(url) = &config.keystore_remote { + match remote_keystore(url) { + Ok(k) => keystore_container.set_remote_keystore(k), + Err(e) => { + return Err(ServiceError::Other(format!( + "Error hooking up remote keystore for {}: {}", + url, e + ))) + } + }; + } + + config + .network + .extra_sets + .push(sc_finality_grandpa::grandpa_peers_set_config()); + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: None, + block_announce_validator_builder: None, + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); + } + + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let backoff_authoring_blocks: Option<()> = None; + let name = config.network.node_name.clone(); + let enable_grandpa = !config.disable_grandpa; + let prometheus_registry = config.prometheus_registry().cloned(); + + let rpc_extensions_builder = { + use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; + + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; + use sc_rpc::DenyUnsafe; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; + + let backend = backend.clone(); + let client = client.clone(); + let pool = transaction_pool.clone(); + + let justification_stream = grandpa_link.justification_stream(); + let shared_authority_set = grandpa_link.shared_authority_set().clone(); + let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty(); + + let finality_proof_provider = + GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone())); + + Box::new(move |_, subscription_executor| { + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(SystemApi::to_delegate(FullSystem::new( + client.clone(), + pool.clone(), + DenyUnsafe::No, + ))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( + client.clone(), + ))); + io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( + shared_authority_set.clone(), + shared_voter_state.clone(), + justification_stream.clone(), + subscription_executor, + finality_proof_provider.clone(), + ))); + + io + }) + }; + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network: network.clone(), + client: client.clone(), + keystore: keystore_container.sync_keystore(), + task_manager: &mut task_manager, + transaction_pool: transaction_pool.clone(), + rpc_extensions_builder, + on_demand: None, + remote_blockchain: None, + backend, + network_status_sinks, + system_rpc_tx, + config, + telemetry: telemetry.as_mut(), + })?; + + if role.is_authority() { + let proposer_factory = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool, + prometheus_registry.as_ref(), + telemetry.as_ref().map(|x| x.handle()), + ); + + let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let aura = sc_consensus_aura::start_aura::(StartAuraParams { + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + client: client.clone(), + select_chain, + block_import, + proposer_factory, + inherent_data_providers, + force_authoring, + backoff_authoring_blocks, + keystore: keystore_container.sync_keystore(), + can_author_with, + sync_oracle: network.clone(), + block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; + + // the AURA authoring task is considered essential, i.e. if it + // fails we take down the service with it. + task_manager.spawn_essential_handle().spawn_blocking("aura", aura); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if role.is_authority() { + Some(keystore_container.sync_keystore()) + } else { + None + }; + + let grandpa_config = sc_finality_grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + is_authority: role.is_authority(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = sc_finality_grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network, + voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), + prometheus_registry, + shared_voter_state: SharedVoterState::empty(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + task_manager + .spawn_essential_handle() + .spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?); + } + + network_starter.start_network(); + Ok(task_manager) +} + +/// Builds a new service for a light client. +pub fn new_light(mut config: Configuration) -> Result { + let telemetry = config + .telemetry_endpoints + .clone() + .filter(|x| !x.is_empty()) + .map(|endpoints| -> Result<_, sc_telemetry::Error> { + let worker = TelemetryWorker::new(16)?; + let telemetry = worker.handle().new_telemetry(endpoints); + Ok((worker, telemetry)) + }) + .transpose()?; + + let (client, backend, keystore_container, mut task_manager, on_demand) = + sc_service::new_light_parts::( + &config, + telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), + )?; + + let mut telemetry = telemetry.map(|(worker, telemetry)| { + task_manager.spawn_handle().spawn("telemetry", worker.run()); + telemetry + }); + + config + .network + .extra_sets + .push(sc_finality_grandpa::grandpa_peers_set_config()); + + let select_chain = sc_consensus::LongestChain::new(backend.clone()); + + let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( + config.transaction_pool.clone(), + config.prometheus_registry(), + task_manager.spawn_handle(), + client.clone(), + on_demand.clone(), + )); + + let (grandpa_block_import, _) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain, + telemetry.as_ref().map(|x| x.handle()), + )?; + + let aura_block_import = + sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(grandpa_block_import.clone(), client.clone()); + + let import_queue = sc_consensus_aura::import_queue::(ImportQueueParams { + block_import: aura_block_import, + justification_import: Some(Box::new(grandpa_block_import)), + client: client.clone(), + inherent_data_providers: InherentDataProviders::new(), + spawner: &task_manager.spawn_essential_handle(), + can_author_with: sp_consensus::NeverCanAuthor, + slot_duration: sc_consensus_aura::slot_duration(&*client)?, + registry: config.prometheus_registry(), + check_for_equivocation: Default::default(), + telemetry: telemetry.as_ref().map(|x| x.handle()), + })?; + + let (network, network_status_sinks, system_rpc_tx, network_starter) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + on_demand: Some(on_demand.clone()), + block_announce_validator_builder: None, + })?; + + if config.offchain_worker.enabled { + sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone()); + } + + sc_service::spawn_tasks(sc_service::SpawnTasksParams { + remote_blockchain: Some(backend.remote_blockchain()), + transaction_pool, + task_manager: &mut task_manager, + on_demand: Some(on_demand), + rpc_extensions_builder: Box::new(|_, _| ()), + config, + client, + keystore: keystore_container.sync_keystore(), + backend, + network, + network_status_sinks, + system_rpc_tx, + telemetry: telemetry.as_mut(), + })?; + + network_starter.start_network(); + + Ok(task_manager) +} diff --git a/polkadot/bin/rialto/runtime/Cargo.toml b/polkadot/bin/rialto/runtime/Cargo.toml new file mode 100644 index 00000000000..ea8c51d0e8c --- /dev/null +++ b/polkadot/bin/rialto/runtime/Cargo.toml @@ -0,0 +1,132 @@ +[package] +name = "rialto-runtime" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/parity-bridges-common/" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +hex-literal = "0.3" +libsecp256k1 = { version = "0.3.4", optional = true, default-features = false, features = ["hmac"] } +log = { version = "0.4.14", default-features = false } +serde = { version = "1.0.124", optional = true, features = ["derive"] } + +# Bridge dependencies + +bp-currency-exchange = { path = "../../../primitives/currency-exchange", default-features = false } +bp-eth-poa = { path = "../../../primitives/ethereum-poa", default-features = false } +bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } +bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } +bp-messages = { path = "../../../primitives/messages", default-features = false } +bp-millau = { path = "../../../primitives/chain-millau", default-features = false } +bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } +bp-runtime = { path = "../../../primitives/runtime", default-features = false } +bridge-runtime-common = { path = "../../runtime-common", default-features = false } +pallet-bridge-currency-exchange = { path = "../../../modules/currency-exchange", default-features = false } +pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } +pallet-bridge-eth-poa = { path = "../../../modules/ethereum", default-features = false } +pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } +pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } + +# Substrate Dependencies + +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + + +[dev-dependencies] +libsecp256k1 = { version = "0.3.4", features = ["hmac"] } + +[build-dependencies] +wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } + +[features] +default = ["std"] +std = [ + "bp-currency-exchange/std", + "bp-eth-poa/std", + "bp-header-chain/std", + "bp-message-dispatch/std", + "bp-messages/std", + "bp-millau/std", + "bp-rialto/std", + "bp-runtime/std", + "bridge-runtime-common/std", + "codec/std", + "frame-benchmarking/std", + "frame-executive/std", + "frame-support/std", + "frame-system-rpc-runtime-api/std", + "frame-system/std", + "log/std", + "pallet-aura/std", + "pallet-balances/std", + "pallet-bridge-currency-exchange/std", + "pallet-bridge-dispatch/std", + "pallet-bridge-eth-poa/std", + "pallet-bridge-grandpa/std", + "pallet-bridge-messages/std", + "pallet-grandpa/std", + "pallet-randomness-collective-flip/std", + "pallet-shift-session-manager/std", + "pallet-sudo/std", + "pallet-timestamp/std", + "pallet-transaction-payment-rpc-runtime-api/std", + "pallet-transaction-payment/std", + "serde", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-finality-grandpa/std", + "sp-inherents/std", + "sp-io/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-transaction-pool/std", + "sp-trie/std", + "sp-version/std", +] +runtime-benchmarks = [ + "bridge-runtime-common/runtime-benchmarks", + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "libsecp256k1", + "pallet-bridge-currency-exchange/runtime-benchmarks", + "pallet-bridge-eth-poa/runtime-benchmarks", + "pallet-bridge-messages/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] diff --git a/polkadot/bin/rialto/runtime/build.rs b/polkadot/bin/rialto/runtime/build.rs new file mode 100644 index 00000000000..dcb5cb06218 --- /dev/null +++ b/polkadot/bin/rialto/runtime/build.rs @@ -0,0 +1,26 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use wasm_builder_runner::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates("1.0.11") + .export_heap_base() + .import_memory() + .build() +} diff --git a/polkadot/bin/rialto/runtime/src/benches.rs b/polkadot/bin/rialto/runtime/src/benches.rs new file mode 100644 index 00000000000..86d6b8361c6 --- /dev/null +++ b/polkadot/bin/rialto/runtime/src/benches.rs @@ -0,0 +1,37 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! We want to use a different validator configuration for benchmarking than what's used in Kovan +//! or in our Rialto test network. However, we can't configure a new validator set on the fly which +//! means we need to wire the runtime together like this + +use pallet_bridge_eth_poa::{ValidatorsConfiguration, ValidatorsSource}; +use sp_std::vec; + +pub use crate::kovan::{ + genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval, PruningStrategy, +}; + +frame_support::parameter_types! { + pub BridgeValidatorsConfiguration: pallet_bridge_eth_poa::ValidatorsConfiguration = bench_validator_config(); +} + +fn bench_validator_config() -> ValidatorsConfiguration { + ValidatorsConfiguration::Multi(vec![ + (0, ValidatorsSource::List(vec![[1; 20].into()])), + (1, ValidatorsSource::Contract([3; 20].into(), vec![[1; 20].into()])), + ]) +} diff --git a/polkadot/bin/rialto/runtime/src/exchange.rs b/polkadot/bin/rialto/runtime/src/exchange.rs new file mode 100644 index 00000000000..a054962a79c --- /dev/null +++ b/polkadot/bin/rialto/runtime/src/exchange.rs @@ -0,0 +1,260 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Support for PoA -> Substrate native tokens exchange. +//! +//! If you want to exchange native PoA tokens for native Substrate +//! chain tokens, you need to: +//! 1) send some PoA tokens to `LOCK_FUNDS_ADDRESS` address on PoA chain. Data field of +//! the transaction must be SCALE-encoded id of Substrate account that will receive +//! funds on Substrate chain; +//! 2) wait until the 'lock funds' transaction is mined on PoA chain; +//! 3) wait until the block containing the 'lock funds' transaction is finalized on PoA chain; +//! 4) wait until the required PoA header and its finality are provided +//! to the PoA -> Substrate bridge module (it can be provided by you); +//! 5) receive tokens by providing proof-of-inclusion of PoA transaction. + +use bp_currency_exchange::{ + Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction, Result as ExchangeResult, +}; +use bp_eth_poa::{transaction_decode_rlp, RawTransaction, RawTransactionReceipt}; +use codec::{Decode, Encode}; +use frame_support::RuntimeDebug; +use hex_literal::hex; +use sp_std::vec::Vec; + +/// Ethereum address where locked PoA funds must be sent to. +pub const LOCK_FUNDS_ADDRESS: [u8; 20] = hex!("DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"); + +/// Ethereum transaction inclusion proof. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct EthereumTransactionInclusionProof { + /// Hash of the block with transaction. + pub block: sp_core::H256, + /// Index of the transaction within the block. + pub index: u64, + /// The proof itself (right now it is all RLP-encoded transactions of the block + + /// RLP-encoded receipts of all transactions of the block). + pub proof: Vec<(RawTransaction, RawTransactionReceipt)>, +} + +/// We uniquely identify transfer by the pair (sender, nonce). +/// +/// The assumption is that this pair will never appear more than once in +/// transactions included into finalized blocks. This is obviously true +/// for any existing eth-like chain (that keep current tx format), because +/// otherwise transaction can be replayed over and over. +#[derive(Encode, Decode, PartialEq, RuntimeDebug)] +pub struct EthereumTransactionTag { + /// Account that has locked funds. + pub account: [u8; 20], + /// Lock transaction nonce. + pub nonce: sp_core::U256, +} + +/// Eth transaction from runtime perspective. +pub struct EthTransaction; + +impl MaybeLockFundsTransaction for EthTransaction { + type Transaction = RawTransaction; + type Id = EthereumTransactionTag; + type Recipient = crate::AccountId; + type Amount = crate::Balance; + + fn parse( + raw_tx: &Self::Transaction, + ) -> ExchangeResult> { + let tx = transaction_decode_rlp(raw_tx).map_err(|_| ExchangeError::InvalidTransaction)?; + + // we only accept transactions sending funds directly to the pre-configured address + if tx.unsigned.to != Some(LOCK_FUNDS_ADDRESS.into()) { + log::trace!( + target: "runtime", + "Failed to parse fund locks transaction. Invalid peer recipient: {:?}", + tx.unsigned.to, + ); + + return Err(ExchangeError::InvalidTransaction); + } + + let mut recipient_raw = sp_core::H256::default(); + match tx.unsigned.payload.len() { + 32 => recipient_raw.as_fixed_bytes_mut().copy_from_slice(&tx.unsigned.payload), + len => { + log::trace!( + target: "runtime", + "Failed to parse fund locks transaction. Invalid recipient length: {}", + len, + ); + + return Err(ExchangeError::InvalidRecipient); + } + } + let amount = tx.unsigned.value.low_u128(); + + if tx.unsigned.value != amount.into() { + log::trace!( + target: "runtime", + "Failed to parse fund locks transaction. Invalid amount: {}", + tx.unsigned.value, + ); + + return Err(ExchangeError::InvalidAmount); + } + + Ok(LockFundsTransaction { + id: EthereumTransactionTag { + account: *tx.sender.as_fixed_bytes(), + nonce: tx.unsigned.nonce, + }, + recipient: crate::AccountId::from(*recipient_raw.as_fixed_bytes()), + amount, + }) + } +} + +/// Prepares everything required to bench claim of funds locked by given transaction. +#[cfg(feature = "runtime-benchmarks")] +pub(crate) fn prepare_environment_for_claim, I: frame_support::traits::Instance>( + transactions: &[(RawTransaction, RawTransactionReceipt)], +) -> bp_eth_poa::H256 { + use bp_eth_poa::compute_merkle_root; + use pallet_bridge_eth_poa::{ + test_utils::{insert_dummy_header, validator_utils::validator, HeaderBuilder}, + BridgeStorage, Storage, + }; + + let mut storage = BridgeStorage::::new(); + let header = HeaderBuilder::with_parent_number_on_runtime::(0) + .transactions_root(compute_merkle_root(transactions.iter().map(|(tx, _)| tx))) + .receipts_root(compute_merkle_root(transactions.iter().map(|(_, receipt)| receipt))) + .sign_by(&validator(0)); + let header_id = header.compute_id(); + insert_dummy_header(&mut storage, header); + storage.finalize_and_prune_headers(Some(header_id), 0); + + header_id.hash +} + +/// Prepare signed ethereum lock-funds transaction. +#[cfg(any(feature = "runtime-benchmarks", test))] +pub(crate) fn prepare_ethereum_transaction( + recipient: &crate::AccountId, + editor: impl Fn(&mut bp_eth_poa::UnsignedTransaction), +) -> (RawTransaction, RawTransactionReceipt) { + use bp_eth_poa::{signatures::SignTransaction, Receipt, TransactionOutcome}; + + // prepare tx for OpenEthereum private dev chain: + // chain id is 0x11 + // sender secret is 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 + let chain_id = 0x11; + let signer = secp256k1::SecretKey::parse(&hex!( + "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" + )) + .unwrap(); + let recipient_raw: &[u8; 32] = recipient.as_ref(); + let mut eth_tx = bp_eth_poa::UnsignedTransaction { + nonce: 0.into(), + to: Some(LOCK_FUNDS_ADDRESS.into()), + value: 100.into(), + gas: 100_000.into(), + gas_price: 100_000.into(), + payload: recipient_raw.to_vec(), + }; + editor(&mut eth_tx); + ( + eth_tx.sign_by(&signer, Some(chain_id)), + Receipt { + outcome: TransactionOutcome::StatusCode(1), + gas_used: Default::default(), + log_bloom: Default::default(), + logs: Vec::new(), + } + .rlp(), + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + fn ferdie() -> crate::AccountId { + hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c").into() + } + + #[test] + fn valid_transaction_accepted() { + assert_eq!( + EthTransaction::parse(&prepare_ethereum_transaction(&ferdie(), |_| {}).0), + Ok(LockFundsTransaction { + id: EthereumTransactionTag { + account: hex!("00a329c0648769a73afac7f9381e08fb43dbea72"), + nonce: 0.into(), + }, + recipient: ferdie(), + amount: 100, + }), + ); + } + + #[test] + fn invalid_transaction_rejected() { + assert_eq!( + EthTransaction::parse(&Vec::new()), + Err(ExchangeError::InvalidTransaction), + ); + } + + #[test] + fn transaction_with_invalid_peer_recipient_rejected() { + assert_eq!( + EthTransaction::parse( + &prepare_ethereum_transaction(&ferdie(), |tx| { + tx.to = None; + }) + .0 + ), + Err(ExchangeError::InvalidTransaction), + ); + } + + #[test] + fn transaction_with_invalid_recipient_rejected() { + assert_eq!( + EthTransaction::parse( + &prepare_ethereum_transaction(&ferdie(), |tx| { + tx.payload.clear(); + }) + .0 + ), + Err(ExchangeError::InvalidRecipient), + ); + } + + #[test] + fn transaction_with_invalid_amount_rejected() { + assert_eq!( + EthTransaction::parse( + &prepare_ethereum_transaction(&ferdie(), |tx| { + tx.value = sp_core::U256::from(u128::max_value()) + sp_core::U256::from(1); + }) + .0 + ), + Err(ExchangeError::InvalidAmount), + ); + } +} diff --git a/polkadot/bin/rialto/runtime/src/kovan.rs b/polkadot/bin/rialto/runtime/src/kovan.rs new file mode 100644 index 00000000000..03b0ca8a071 --- /dev/null +++ b/polkadot/bin/rialto/runtime/src/kovan.rs @@ -0,0 +1,192 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::exchange::EthereumTransactionInclusionProof; + +use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; +use bp_header_chain::InclusionProofVerifier; +use frame_support::RuntimeDebug; +use hex_literal::hex; +use pallet_bridge_eth_poa::{ + AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy, ValidatorsConfiguration, + ValidatorsSource, +}; +use sp_std::prelude::*; + +frame_support::parameter_types! { + pub const FinalityVotesCachingInterval: Option = Some(16); + pub BridgeAuraConfiguration: AuraConfiguration = + kovan_aura_configuration(); + pub BridgeValidatorsConfiguration: ValidatorsConfiguration = + kovan_validators_configuration(); +} + +/// Max number of finalized headers to keep. It is equivalent of ~24 hours of +/// finalized blocks on current Kovan chain. +const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000; + +/// Aura engine configuration for Kovan chain. +pub fn kovan_aura_configuration() -> AuraConfiguration { + AuraConfiguration { + empty_steps_transition: u64::max_value(), + strict_empty_steps_transition: 0, + validate_step_transition: 0x16e360, + validate_score_transition: 0x41a3c4, + two_thirds_majority_transition: u64::max_value(), + min_gas_limit: 0x1388.into(), + max_gas_limit: U256::max_value(), + maximum_extra_data_size: 0x20, + } +} + +/// Validators configuration for Kovan chain. +pub fn kovan_validators_configuration() -> ValidatorsConfiguration { + ValidatorsConfiguration::Multi(vec![ + (0, ValidatorsSource::List(genesis_validators())), + ( + 10960440, + ValidatorsSource::List(vec![ + hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), + hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), + hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), + ]), + ), + ( + 10960500, + ValidatorsSource::Contract( + hex!("aE71807C1B0a093cB1547b682DC78316D945c9B8").into(), + vec![ + hex!("d05f7478c6aa10781258c5cc8b4f385fc8fa989c").into(), + hex!("03801efb0efe2a25ede5dd3a003ae880c0292e4d").into(), + hex!("a4df255ecf08bbf2c28055c65225c9a9847abd94").into(), + hex!("596e8221a30bfe6e7eff67fee664a01c73ba3c56").into(), + hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), + ], + ), + ), + ]) +} + +/// Genesis validators set of Kovan chain. +pub fn genesis_validators() -> Vec
{ + vec![ + hex!("00D6Cc1BA9cf89BD2e58009741f4F7325BAdc0ED").into(), + hex!("00427feae2419c15b89d1c21af10d1b6650a4d3d").into(), + hex!("4Ed9B08e6354C70fE6F8CB0411b0d3246b424d6c").into(), + hex!("0020ee4Be0e2027d76603cB751eE069519bA81A1").into(), + hex!("0010f94b296a852aaac52ea6c5ac72e03afd032d").into(), + hex!("007733a1FE69CF3f2CF989F81C7b4cAc1693387A").into(), + hex!("00E6d2b931F55a3f1701c7389d592a7778897879").into(), + hex!("00e4a10650e5a6D6001C38ff8E64F97016a1645c").into(), + hex!("00a0a24b9f0e5ec7aa4c7389b8302fd0123194de").into(), + ] +} + +/// Genesis header of the Kovan chain. +pub fn genesis_header() -> AuraHeader { + AuraHeader { + parent_hash: Default::default(), + timestamp: 0, + number: 0, + author: Default::default(), + transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), + uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), + extra_data: vec![], + state_root: hex!("2480155b48a1cea17d67dbfdfaafe821c1d19cdd478c5358e8ec56dec24502b2").into(), + receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), + log_bloom: Default::default(), + gas_used: Default::default(), + gas_limit: 6000000.into(), + difficulty: 131072.into(), + seal: vec![ + vec![128], + vec![ + 184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ], + ], + } +} + +/// Kovan headers pruning strategy. +/// +/// We do not prune unfinalized headers because exchange module only accepts +/// claims from finalized headers. And if we're pruning unfinalized headers, then +/// some claims may never be accepted. +#[derive(Default, RuntimeDebug)] +pub struct PruningStrategy; + +impl BridgePruningStrategy for PruningStrategy { + fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { + best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) + } +} + +/// PoA Header timestamp verification against `Timestamp` pallet. +#[derive(Default, RuntimeDebug)] +pub struct ChainTime; + +impl TChainTime for ChainTime { + fn is_timestamp_ahead(&self, timestamp: u64) -> bool { + let now = super::Timestamp::now(); + timestamp > now + } +} + +/// The Kovan Blockchain as seen by the runtime. +pub struct KovanBlockchain; + +impl InclusionProofVerifier for KovanBlockchain { + type Transaction = RawTransaction; + type TransactionInclusionProof = EthereumTransactionInclusionProof; + + fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { + let is_transaction_finalized = + crate::BridgeKovan::verify_transaction_finalized(proof.block, proof.index, &proof.proof); + + if !is_transaction_finalized { + return None; + } + + proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn pruning_strategy_keeps_enough_headers() { + assert_eq!( + PruningStrategy::default().pruning_upper_bound(100_000, 10_000), + 0, + "10_000 <= 20_000 => nothing should be pruned yet", + ); + + assert_eq!( + PruningStrategy::default().pruning_upper_bound(100_000, 20_000), + 0, + "20_000 <= 20_000 => nothing should be pruned yet", + ); + + assert_eq!( + PruningStrategy::default().pruning_upper_bound(100_000, 30_000), + 10_000, + "20_000 <= 30_000 => we're ready to prune first 10_000 headers", + ); + } +} diff --git a/polkadot/bin/rialto/runtime/src/lib.rs b/polkadot/bin/rialto/runtime/src/lib.rs new file mode 100644 index 00000000000..4e81d3efb1f --- /dev/null +++ b/polkadot/bin/rialto/runtime/src/lib.rs @@ -0,0 +1,1148 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The Rialto runtime. This can be compiled with `#[no_std]`, ready for Wasm. + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit = "256"] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] +// Runtime-generated DecodeLimit::decode_all_With_depth_limit +#![allow(clippy::unnecessary_mut_passed)] +// From construct_runtime macro +#![allow(clippy::from_over_into)] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +pub mod exchange; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benches; +pub mod kovan; +pub mod millau_messages; +pub mod rialto_poa; + +use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; + +use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge}; +use codec::Decode; +use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; +use sp_api::impl_runtime_apis; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_runtime::traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, MultiSigner, +}; +use sp_std::prelude::*; +#[cfg(feature = "std")] +use sp_version::NativeVersion; +use sp_version::RuntimeVersion; + +// A few exports that help ease life for downstream crates. +pub use frame_support::{ + construct_runtime, parameter_types, + traits::{Currency, ExistenceRequirement, Imbalance, KeyOwnerProofSystem, Randomness}, + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, RuntimeDbWeight, Weight}, + StorageValue, +}; + +pub use frame_system::Call as SystemCall; +pub use pallet_balances::Call as BalancesCall; +pub use pallet_bridge_currency_exchange::Call as BridgeCurrencyExchangeCall; +pub use pallet_bridge_eth_poa::Call as BridgeEthPoACall; +pub use pallet_bridge_grandpa::Call as BridgeGrandpaMillauCall; +pub use pallet_bridge_messages::Call as MessagesCall; +pub use pallet_sudo::Call as SudoCall; +pub use pallet_timestamp::Call as TimestampCall; + +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; + +/// An index to a block. +pub type BlockNumber = bp_rialto::BlockNumber; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = bp_rialto::Signature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = bp_rialto::AccountId; + +/// The type for looking up accounts. We don't expect more than 4 billion of them, but you +/// never know... +pub type AccountIndex = u32; + +/// Balance of an account. +pub type Balance = bp_rialto::Balance; + +/// Index of a transaction in the chain. +pub type Index = u32; + +/// A hash of some data used by the chain. +pub type Hash = bp_rialto::Hash; + +/// Hashing algorithm used by the chain. +pub type Hashing = bp_rialto::Hasher; + +/// Digest item type. +pub type DigestItem = generic::DigestItem; + +/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know +/// the specifics of the runtime. They can then be made to be agnostic over specific formats +/// of data like extrinsics, allowing for them to continue syncing the network through upgrades +/// to even the core data structures. +pub mod opaque { + use super::*; + + pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + + /// Opaque block header type. + pub type Header = generic::Header; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; +} + +impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + pub grandpa: Grandpa, + } +} + +/// This runtime version. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("rialto-runtime"), + impl_name: create_runtime_str!("rialto-runtime"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, +}; + +/// The version information used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } +} + +parameter_types! { + pub const BlockHashCount: BlockNumber = 250; + pub const Version: RuntimeVersion = VERSION; + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 60_000_000, // ~0.06 ms = ~60 µs + write: 200_000_000, // ~0.2 ms = 200 µs + }; + pub const SS58Prefix: u8 = 48; +} + +impl frame_system::Config for Runtime { + /// The basic call filter to use in dispatchable. + type BaseCallFilter = (); + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type Call = Call; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = IdentityLookup; + /// The index type for storing how many extrinsics an account has signed. + type Index = Index; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = Hashing; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous origin type. + type Origin = Origin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Version of the runtime. + type Version = Version; + /// Provides information about the pallet setup in the runtime. + type PalletInfo = PalletInfo; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The data to be stored in an account. + type AccountData = pallet_balances::AccountData; + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + /// Weight information for the extrinsics of this pallet. + type SystemWeightInfo = (); + /// Block and extrinsics weights: base values and limits. + type BlockWeights = bp_rialto::BlockWeights; + /// The maximum length of a block (in bytes). + type BlockLength = bp_rialto::BlockLength; + /// The weight of database operations that the runtime can invoke. + type DbWeight = DbWeight; + /// The designated SS58 prefix of this chain. + type SS58Prefix = SS58Prefix; + /// The set code logic, just the default since we're not a parachain. + type OnSetCode = (); +} + +impl pallet_aura::Config for Runtime { + type AuthorityId = AuraId; +} + +type RialtoPoA = pallet_bridge_eth_poa::Instance1; +impl pallet_bridge_eth_poa::Config for Runtime { + type AuraConfiguration = rialto_poa::BridgeAuraConfiguration; + type FinalityVotesCachingInterval = rialto_poa::FinalityVotesCachingInterval; + type ValidatorsConfiguration = rialto_poa::BridgeValidatorsConfiguration; + type PruningStrategy = rialto_poa::PruningStrategy; + type ChainTime = rialto_poa::ChainTime; + type OnHeadersSubmitted = (); +} + +type Kovan = pallet_bridge_eth_poa::Instance2; +impl pallet_bridge_eth_poa::Config for Runtime { + type AuraConfiguration = kovan::BridgeAuraConfiguration; + type FinalityVotesCachingInterval = kovan::FinalityVotesCachingInterval; + type ValidatorsConfiguration = kovan::BridgeValidatorsConfiguration; + type PruningStrategy = kovan::PruningStrategy; + type ChainTime = kovan::ChainTime; + type OnHeadersSubmitted = (); +} + +type RialtoCurrencyExchange = pallet_bridge_currency_exchange::Instance1; +impl pallet_bridge_currency_exchange::Config for Runtime { + type OnTransactionSubmitted = (); + type PeerBlockchain = rialto_poa::RialtoBlockchain; + type PeerMaybeLockFundsTransaction = exchange::EthTransaction; + type RecipientsMap = bp_currency_exchange::IdentityRecipients; + type Amount = Balance; + type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; + type DepositInto = DepositInto; +} + +type KovanCurrencyExchange = pallet_bridge_currency_exchange::Instance2; +impl pallet_bridge_currency_exchange::Config for Runtime { + type OnTransactionSubmitted = (); + type PeerBlockchain = kovan::KovanBlockchain; + type PeerMaybeLockFundsTransaction = exchange::EthTransaction; + type RecipientsMap = bp_currency_exchange::IdentityRecipients; + type Amount = Balance; + type CurrencyConverter = bp_currency_exchange::IdentityCurrencyConverter; + type DepositInto = DepositInto; +} + +impl pallet_bridge_dispatch::Config for Runtime { + type Event = Event; + type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce); + type Call = Call; + type CallFilter = (); + type EncodedCall = crate::millau_messages::FromMillauEncodedCall; + type SourceChainAccountId = bp_millau::AccountId; + type TargetChainAccountPublic = MultiSigner; + type TargetChainSignature = MultiSignature; + type AccountIdConverter = bp_rialto::AccountIdConverter; +} + +pub struct DepositInto; + +impl bp_currency_exchange::DepositInto for DepositInto { + type Recipient = AccountId; + type Amount = Balance; + + fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> { + // let balances module make all checks for us (it won't allow depositing lower than existential + // deposit, balance overflow, ...) + let deposited = as Currency>::deposit_creating(&recipient, amount); + + // I'm dropping deposited here explicitly to illustrate the fact that it'll update `TotalIssuance` + // on drop + let deposited_amount = deposited.peek(); + drop(deposited); + + // we have 3 cases here: + // - deposited == amount: success + // - deposited == 0: deposit has failed and no changes to storage were made + // - deposited != 0: (should never happen in practice) deposit has been partially completed + match deposited_amount { + _ if deposited_amount == amount => { + log::trace!( + target: "runtime", + "Deposited {} to {:?}", + amount, + recipient, + ); + + Ok(()) + } + _ if deposited_amount == 0 => { + log::error!( + target: "runtime", + "Deposit of {} to {:?} has failed", + amount, + recipient, + ); + + Err(bp_currency_exchange::Error::DepositFailed) + } + _ => { + log::error!( + target: "runtime", + "Deposit of {} to {:?} has partially competed. {} has been deposited", + amount, + recipient, + deposited_amount, + ); + + // we can't return DepositFailed error here, because storage changes were made + Err(bp_currency_exchange::Error::DepositPartiallyFailed) + } + } + } +} + +impl pallet_grandpa::Config for Runtime { + type Event = Event; + type Call = Call; + type KeyOwnerProofSystem = (); + type KeyOwnerProof = >::Proof; + type KeyOwnerIdentification = + >::IdentificationTuple; + type HandleEquivocation = (); + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); +} + +parameter_types! { + pub const MinimumPeriod: u64 = bp_rialto::SLOT_DURATION / 2; +} + +impl pallet_timestamp::Config for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = MinimumPeriod; + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); +} + +parameter_types! { + pub const ExistentialDeposit: bp_rialto::Balance = 500; + // For weight estimation, we assume that the most locks on an individual account will be 50. + // This number may need to be adjusted in the future if this assumption no longer holds true. + pub const MaxLocks: u32 = 50; +} + +impl pallet_balances::Config for Runtime { + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); + type MaxLocks = MaxLocks; +} + +parameter_types! { + pub const TransactionBaseFee: Balance = 0; + pub const TransactionByteFee: Balance = 1; +} + +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +impl pallet_sudo::Config for Runtime { + type Event = Event; + type Call = Call; +} + +parameter_types! { + pub const Period: BlockNumber = bp_rialto::SESSION_LENGTH; + pub const Offset: BlockNumber = 0; +} + +impl pallet_session::Config for Runtime { + type Event = Event; + type ValidatorId = ::AccountId; + type ValidatorIdOf = (); + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = pallet_shift_session_manager::Pallet; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type DisabledValidatorsThreshold = (); + // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + type WeightInfo = (); +} + +parameter_types! { + // This is a pretty unscientific cap. + // + // Note that once this is hit the pallet will essentially throttle incoming requests down to one + // call per block. + pub const MaxRequests: u32 = 50; + + // Number of headers to keep. + // + // Assuming the worst case of every header being finalized, we will keep headers at least for a + // week. + pub const HeadersToKeep: u32 = 7 * bp_rialto::DAYS as u32; +} + +impl pallet_bridge_grandpa::Config for Runtime { + type BridgedChain = bp_millau::Millau; + type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + type WeightInfo = pallet_bridge_grandpa::weights::RialtoWeight; +} + +impl pallet_shift_session_manager::Config for Runtime {} + +parameter_types! { + pub const MaxMessagesToPruneAtOnce: bp_messages::MessageNonce = 8; + pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = + bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE; + pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = + bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE; + // `IdentityFee` is used by Rialto => we may use weight directly + pub const GetDeliveryConfirmationTransactionFee: Balance = + bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; + pub const RootAccountForPayments: Option = None; +} + +/// Instance of the messages pallet used to relay messages to/from Millau chain. +pub type WithMillauMessagesInstance = pallet_bridge_messages::DefaultInstance; + +impl pallet_bridge_messages::Config for Runtime { + type Event = Event; + type WeightInfo = pallet_bridge_messages::weights::RialtoWeight; + type Parameter = millau_messages::RialtoToMillauMessagesParameter; + type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; + type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; + type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; + + type OutboundPayload = crate::millau_messages::ToMillauMessagePayload; + type OutboundMessageFee = Balance; + + type InboundPayload = crate::millau_messages::FromMillauMessagePayload; + type InboundMessageFee = bp_millau::Balance; + type InboundRelayer = bp_millau::AccountId; + + type AccountIdConverter = bp_rialto::AccountIdConverter; + + type TargetHeaderChain = crate::millau_messages::Millau; + type LaneMessageVerifier = crate::millau_messages::ToMillauMessageVerifier; + type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments< + Runtime, + pallet_balances::Pallet, + GetDeliveryConfirmationTransactionFee, + RootAccountForPayments, + >; + + type SourceHeaderChain = crate::millau_messages::Millau; + type MessageDispatch = crate::millau_messages::FromMillauMessageDispatch; +} + +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = opaque::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + BridgeRialtoPoA: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, + BridgeKovan: pallet_bridge_eth_poa::::{Pallet, Call, Config, Storage, ValidateUnsigned}, + BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, + BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::::{Pallet, Call}, + BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, + BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, + BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event}, + System: frame_system::{Pallet, Call, Config, Storage, Event}, + RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage}, + Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, + Aura: pallet_aura::{Pallet, Config}, + Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + TransactionPayment: pallet_transaction_payment::{Pallet, Storage}, + Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + ShiftSessionManager: pallet_shift_session_manager::{Pallet}, + } +); + +/// The address format for describing accounts. +pub type Address = AccountId; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + frame_system::CheckSpecVersion, + frame_system::CheckTxVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, +); +/// The payload being signed in transactions. +pub type SignedPayload = generic::SignedPayload; +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = + frame_executive::Executive, Runtime, AllPallets>; + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block); + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + RandomnessCollectiveFlip::random_seed().0 + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Index { + System::account_nonce(account) + } + } + + impl bp_eth_poa::RialtoPoAHeaderApi for Runtime { + fn best_block() -> (u64, bp_eth_poa::H256) { + let best_block = BridgeRialtoPoA::best_block(); + (best_block.number, best_block.hash) + } + + fn finalized_block() -> (u64, bp_eth_poa::H256) { + let finalized_block = BridgeRialtoPoA::finalized_block(); + (finalized_block.number, finalized_block.hash) + } + + fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { + BridgeRialtoPoA::is_import_requires_receipts(header) + } + + fn is_known_block(hash: bp_eth_poa::H256) -> bool { + BridgeRialtoPoA::is_known_block(hash) + } + } + + impl bp_eth_poa::KovanHeaderApi for Runtime { + fn best_block() -> (u64, bp_eth_poa::H256) { + let best_block = BridgeKovan::best_block(); + (best_block.number, best_block.hash) + } + + fn finalized_block() -> (u64, bp_eth_poa::H256) { + let finalized_block = BridgeKovan::finalized_block(); + (finalized_block.number, finalized_block.hash) + } + + fn is_import_requires_receipts(header: bp_eth_poa::AuraHeader) -> bool { + BridgeKovan::is_import_requires_receipts(header) + } + + fn is_known_block(hash: bp_eth_poa::H256) -> bool { + BridgeKovan::is_known_block(hash) + } + } + + impl bp_millau::MillauFinalityApi for Runtime { + fn best_finalized() -> (bp_millau::BlockNumber, bp_millau::Hash) { + let header = BridgeMillauGrandpa::best_finalized(); + (header.number, header.hash()) + } + + fn is_known_header(hash: bp_millau::Hash) -> bool { + BridgeMillauGrandpa::is_known_header(hash) + } + } + + impl bp_currency_exchange::RialtoCurrencyExchangeApi for Runtime { + fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { + BridgeRialtoCurrencyExchange::filter_transaction_proof(&proof) + } + } + + impl bp_currency_exchange::KovanCurrencyExchangeApi for Runtime { + fn filter_transaction_proof(proof: exchange::EthereumTransactionInclusionProof) -> bool { + BridgeKovanCurrencyExchange::filter_transaction_proof(&proof) + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> sp_consensus_aura::SlotDuration { + sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) + } + + fn authorities() -> Vec { + Aura::authorities() + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + > for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { + TransactionPayment::query_fee_details(uxt, len) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + NumberFor, + >, + key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Grandpa::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + _authority_id: GrandpaId, + ) -> Option { + // NOTE: this is the only implementation possible since we've + // defined our key owner proof type as a bottom type (i.e. a type + // with no values). + None + } + } + + impl bp_millau::ToMillauOutboundLaneApi for Runtime { + fn estimate_message_delivery_and_dispatch_fee( + _lane_id: bp_messages::LaneId, + payload: ToMillauMessagePayload, + ) -> Option { + estimate_message_dispatch_and_delivery_fee::( + &payload, + WithMillauMessageBridge::RELAYER_FEE_PERCENT, + ).ok() + } + + fn messages_dispatch_weight( + lane: bp_messages::LaneId, + begin: bp_messages::MessageNonce, + end: bp_messages::MessageNonce, + ) -> Vec<(bp_messages::MessageNonce, Weight, u32)> { + (begin..=end).filter_map(|nonce| { + let encoded_payload = BridgeMillauMessages::outbound_message_payload(lane, nonce)?; + let decoded_payload = millau_messages::ToMillauMessagePayload::decode( + &mut &encoded_payload[..] + ).ok()?; + Some((nonce, decoded_payload.weight, encoded_payload.len() as _)) + }) + .collect() + } + + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::outbound_latest_received_nonce(lane) + } + + fn latest_generated_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::outbound_latest_generated_nonce(lane) + } + } + + impl bp_millau::FromMillauInboundLaneApi for Runtime { + fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::inbound_latest_received_nonce(lane) + } + + fn latest_confirmed_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce { + BridgeMillauMessages::inbound_latest_confirmed_nonce(lane) + } + + fn unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState { + BridgeMillauMessages::inbound_unrewarded_relayers_state(lane) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn dispatch_benchmark( + config: frame_benchmarking::BenchmarkConfig, + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, TrackedStorageKey, add_benchmark}; + + let whitelist: Vec = vec![ + // Block Number + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), + // Execution Phase + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), + // Event Count + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), + // System Events + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), + // Caller 0 Account + hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec().into(), + ]; + + let mut batches = Vec::::new(); + let params = (&config, &whitelist); + + use pallet_bridge_currency_exchange::benchmarking::{ + Pallet as BridgeCurrencyExchangeBench, + Config as BridgeCurrencyExchangeConfig, + ProofParams as BridgeCurrencyExchangeProofParams, + }; + + impl BridgeCurrencyExchangeConfig for Runtime { + fn make_proof( + proof_params: BridgeCurrencyExchangeProofParams, + ) -> crate::exchange::EthereumTransactionInclusionProof { + use bp_currency_exchange::DepositInto; + + if proof_params.recipient_exists { + >::DepositInto::deposit_into( + proof_params.recipient.clone(), + ExistentialDeposit::get(), + ).unwrap(); + } + + let (transaction, receipt) = crate::exchange::prepare_ethereum_transaction( + &proof_params.recipient, + |tx| { + // our runtime only supports transactions where data is exactly 32 bytes long + // (receiver key) + // => we are ignoring `transaction_size_factor` here + tx.value = (ExistentialDeposit::get() * 10).into(); + }, + ); + let transactions = sp_std::iter::repeat((transaction, receipt)) + .take(1 + proof_params.proof_size_factor as usize) + .collect::>(); + let block_hash = crate::exchange::prepare_environment_for_claim::(&transactions); + crate::exchange::EthereumTransactionInclusionProof { + block: block_hash, + index: 0, + proof: transactions, + } + } + } + + use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; + use bridge_runtime_common::messages; + use pallet_bridge_messages::benchmarking::{ + Pallet as MessagesBench, + Config as MessagesConfig, + MessageDeliveryProofParams, + MessageParams, + MessageProofParams, + ProofSize as MessagesProofSize, + }; + + impl MessagesConfig for Runtime { + fn maximal_message_size() -> u32 { + messages::source::maximal_message_size::() + } + + fn bridged_relayer_id() -> Self::InboundRelayer { + Default::default() + } + + fn account_balance(account: &Self::AccountId) -> Self::OutboundMessageFee { + pallet_balances::Pallet::::free_balance(account) + } + + fn endow_account(account: &Self::AccountId) { + pallet_balances::Pallet::::make_free_balance_be( + account, + Balance::MAX / 100, + ); + } + + fn prepare_outbound_message( + params: MessageParams, + ) -> (millau_messages::ToMillauMessagePayload, Balance) { + let message_payload = vec![0; params.size as usize]; + let dispatch_origin = pallet_bridge_dispatch::CallOrigin::SourceAccount( + params.sender_account, + ); + + let message = ToMillauMessagePayload { + spec_version: 0, + weight: params.size as _, + origin: dispatch_origin, + call: message_payload, + }; + (message, pallet_bridge_messages::benchmarking::MESSAGE_FEE.into()) + } + + fn prepare_message_proof( + params: MessageProofParams, + ) -> (millau_messages::FromMillauMessagesProof, Weight) { + use crate::millau_messages::{Millau, WithMillauMessageBridge}; + use bp_messages::MessageKey; + use bridge_runtime_common::{ + messages::ChainWithMessages, + messages_benchmarking::{ed25519_sign, prepare_message_proof}, + }; + use codec::Encode; + use frame_support::weights::GetDispatchInfo; + use pallet_bridge_messages::storage_keys; + use sp_runtime::traits::Header; + + let remark = match params.size { + MessagesProofSize::Minimal(ref size) => vec![0u8; *size as _], + _ => vec![], + }; + let call = Call::System(SystemCall::remark(remark)); + let call_weight = call.get_dispatch_info().weight; + + let millau_account_id: bp_millau::AccountId = Default::default(); + let (rialto_raw_public, rialto_raw_signature) = ed25519_sign( + &call, + &millau_account_id, + ); + let rialto_public = MultiSigner::Ed25519(sp_core::ed25519::Public::from_raw(rialto_raw_public)); + let rialto_signature = MultiSignature::Ed25519(sp_core::ed25519::Signature::from_raw( + rialto_raw_signature, + )); + + let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key::< + Runtime, + ::MessagesInstance, + >( + &message_key.lane_id, message_key.nonce, + ).0; + let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key::< + ::MessagesInstance, + >( + &lane_id, + ).0; + + let make_millau_header = |state_root| bp_millau::Header::new( + 0, + Default::default(), + state_root, + Default::default(), + Default::default(), + ); + + prepare_message_proof::( + params, + make_millau_message_key, + make_millau_outbound_lane_data_key, + make_millau_header, + call_weight, + pallet_bridge_dispatch::MessagePayload { + spec_version: VERSION.spec_version, + weight: call_weight, + origin: pallet_bridge_dispatch::CallOrigin::< + bp_millau::AccountId, + MultiSigner, + Signature, + >::TargetAccount( + millau_account_id, + rialto_public, + rialto_signature, + ), + call: call.encode(), + }.encode(), + ) + } + + fn prepare_message_delivery_proof( + params: MessageDeliveryProofParams, + ) -> millau_messages::ToMillauMessagesDeliveryProof { + use crate::millau_messages::{Millau, WithMillauMessageBridge}; + use bridge_runtime_common::{ + messages::ChainWithMessages, + messages_benchmarking::prepare_message_delivery_proof, + }; + use sp_runtime::traits::Header; + + prepare_message_delivery_proof::( + params, + |lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key::< + Runtime, + ::MessagesInstance, + >( + &lane_id, + ).0, + |state_root| bp_millau::Header::new( + 0, + Default::default(), + state_root, + Default::default(), + Default::default(), + ), + ) + } + } + + add_benchmark!( + params, + batches, + pallet_bridge_currency_exchange, + BridgeCurrencyExchangeBench:: + ); + add_benchmark!( + params, + batches, + pallet_bridge_messages, + MessagesBench:: + ); + add_benchmark!(params, batches, pallet_bridge_grandpa, BridgeMillauGrandpa); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } +} + +/// Millau account ownership digest from Rialto. +/// +/// The byte vector returned by this function should be signed with a Millau account private key. +/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private key +/// is also under his control. +pub fn millau_account_ownership_digest( + millau_call: &Call, + rialto_account_id: AccountId, + millau_spec_version: SpecVersion, +) -> sp_std::vec::Vec +where + Call: codec::Encode, + AccountId: codec::Encode, + SpecVersion: codec::Encode, +{ + pallet_bridge_dispatch::account_ownership_digest( + millau_call, + rialto_account_id, + millau_spec_version, + bp_runtime::RIALTO_BRIDGE_INSTANCE, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use bp_currency_exchange::DepositInto; + use bridge_runtime_common::messages; + + fn run_deposit_into_test(test: impl Fn(AccountId) -> Balance) { + let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::().unwrap().into(); + ext.execute_with(|| { + // initially issuance is zero + assert_eq!( + as Currency>::total_issuance(), + 0, + ); + + // create account + let account: AccountId = [1u8; 32].into(); + let initial_amount = ExistentialDeposit::get(); + let deposited = + as Currency>::deposit_creating(&account, initial_amount); + drop(deposited); + assert_eq!( + as Currency>::total_issuance(), + initial_amount, + ); + assert_eq!( + as Currency>::free_balance(&account), + initial_amount, + ); + + // run test + let total_issuance_change = test(account); + + // check that total issuance has changed by `run_deposit_into_test` + assert_eq!( + as Currency>::total_issuance(), + initial_amount + total_issuance_change, + ); + }); + } + + #[test] + fn ensure_rialto_message_lane_weights_are_correct() { + type Weights = pallet_bridge_messages::weights::RialtoWeight; + + pallet_bridge_messages::ensure_weights_are_correct::( + bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, + bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, + bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + ); + + let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add( + messages::target::maximal_incoming_message_size(bp_rialto::max_extrinsic_size()), + ); + pallet_bridge_messages::ensure_able_to_receive_message::( + bp_rialto::max_extrinsic_size(), + bp_rialto::max_extrinsic_weight(), + max_incoming_message_proof_size, + messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()), + ); + + let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( + bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _, + ) + .unwrap_or(u32::MAX); + pallet_bridge_messages::ensure_able_to_receive_confirmation::( + bp_rialto::max_extrinsic_size(), + bp_rialto::max_extrinsic_weight(), + max_incoming_inbound_lane_data_proof_size, + bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + ); + } + + #[test] + fn deposit_into_existing_account_works() { + run_deposit_into_test(|existing_account| { + let initial_amount = + as Currency>::free_balance(&existing_account); + let additional_amount = 10_000; + >::DepositInto::deposit_into( + existing_account.clone(), + additional_amount, + ) + .unwrap(); + assert_eq!( + as Currency>::free_balance(&existing_account), + initial_amount + additional_amount, + ); + additional_amount + }); + } + + #[test] + fn deposit_into_new_account_works() { + run_deposit_into_test(|_| { + let initial_amount = 0; + let additional_amount = ExistentialDeposit::get() + 10_000; + let new_account: AccountId = [42u8; 32].into(); + >::DepositInto::deposit_into( + new_account.clone(), + additional_amount, + ) + .unwrap(); + assert_eq!( + as Currency>::free_balance(&new_account), + initial_amount + additional_amount, + ); + additional_amount + }); + } +} diff --git a/polkadot/bin/rialto/runtime/src/millau_messages.rs b/polkadot/bin/rialto/runtime/src/millau_messages.rs new file mode 100644 index 00000000000..8ee2094660c --- /dev/null +++ b/polkadot/bin/rialto/runtime/src/millau_messages.rs @@ -0,0 +1,253 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Everything required to serve Millau <-> Rialto messages. + +use crate::Runtime; + +use bp_messages::{ + source_chain::TargetHeaderChain, + target_chain::{ProvedMessages, SourceHeaderChain}, + InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, +}; +use bp_runtime::{InstanceId, MILLAU_BRIDGE_INSTANCE}; +use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; +use codec::{Decode, Encode}; +use frame_support::{ + parameter_types, + weights::{DispatchClass, Weight}, + RuntimeDebug, +}; +use sp_runtime::{FixedPointNumber, FixedU128}; +use sp_std::{convert::TryFrom, ops::RangeInclusive}; + +/// Initial value of `MillauToRialtoConversionRate` parameter. +pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV); + +parameter_types! { + /// Millau to Rialto conversion rate. Initially we treat both tokens as equal. + pub storage MillauToRialtoConversionRate: FixedU128 = INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE; +} + +/// Message payload for Rialto -> Millau messages. +pub type ToMillauMessagePayload = messages::source::FromThisChainMessagePayload; + +/// Message verifier for Rialto -> Millau messages. +pub type ToMillauMessageVerifier = messages::source::FromThisChainMessageVerifier; + +/// Message payload for Millau -> Rialto messages. +pub type FromMillauMessagePayload = messages::target::FromBridgedChainMessagePayload; + +/// Encoded Rialto Call as it comes from Millau. +pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; + +/// Call-dispatch based message dispatch for Millau -> Rialto messages. +pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDispatch< + WithMillauMessageBridge, + crate::Runtime, + pallet_bridge_dispatch::DefaultInstance, +>; + +/// Messages proof for Millau -> Rialto messages. +pub type FromMillauMessagesProof = messages::target::FromBridgedChainMessagesProof; + +/// Messages delivery proof for Rialto -> Millau messages. +pub type ToMillauMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof; + +/// Millau <-> Rialto message bridge. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct WithMillauMessageBridge; + +impl MessageBridge for WithMillauMessageBridge { + const INSTANCE: InstanceId = MILLAU_BRIDGE_INSTANCE; + + const RELAYER_FEE_PERCENT: u32 = 10; + + type ThisChain = Rialto; + type BridgedChain = Millau; + + fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance { + bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance)) + .unwrap_or(bp_rialto::Balance::MAX) + } +} + +/// Rialto chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct Rialto; + +impl messages::ChainWithMessages for Rialto { + type Hash = bp_rialto::Hash; + type AccountId = bp_rialto::AccountId; + type Signer = bp_rialto::AccountSigner; + type Signature = bp_rialto::Signature; + type Weight = Weight; + type Balance = bp_rialto::Balance; + + type MessagesInstance = crate::WithMillauMessagesInstance; +} + +impl messages::ThisChainWithMessages for Rialto { + type Call = crate::Call; + + fn is_outbound_lane_enabled(lane: &LaneId) -> bool { + *lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1] + } + + fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { + MessageNonce::MAX + } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction { + let inbound_data_size = + InboundLaneData::::encoded_size_hint(bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1) + .unwrap_or(u32::MAX); + + MessageTransaction { + dispatch_weight: bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, + size: inbound_data_size + .saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_rialto::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } +} + +/// Millau chain from message lane point of view. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct Millau; + +impl messages::ChainWithMessages for Millau { + type Hash = bp_millau::Hash; + type AccountId = bp_millau::AccountId; + type Signer = bp_millau::AccountSigner; + type Signature = bp_millau::Signature; + type Weight = Weight; + type Balance = bp_millau::Balance; + + type MessagesInstance = pallet_bridge_messages::DefaultInstance; +} + +impl messages::BridgedChainWithMessages for Millau { + fn maximal_extrinsic_size() -> u32 { + bp_millau::max_extrinsic_size() + } + + fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { + // we don't want to relay too large messages + keep reserve for future upgrades + let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()); + + // we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment` function + // + // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about + // minimal dispatch weight here + + 0..=upper_limit + } + + fn estimate_delivery_transaction( + message_payload: &[u8], + message_dispatch_weight: Weight, + ) -> MessageTransaction { + let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); + let extra_bytes_in_payload = Weight::from(message_payload_len) + .saturating_sub(pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); + + MessageTransaction { + dispatch_weight: extra_bytes_in_payload + .saturating_mul(bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) + .saturating_add(bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) + .saturating_add(message_dispatch_weight), + size: message_payload_len + .saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE) + .saturating_add(bp_millau::TX_EXTRA_BYTES), + } + } + + fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { + // in our testnets, both per-byte fee and weight-to-fee are 1:1 + messages::transaction_payment( + bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, + 1, + FixedU128::zero(), + |weight| weight as _, + transaction, + ) + } +} + +impl TargetHeaderChain for Millau { + type Error = &'static str; + // The proof is: + // - hash of the header this proof has been created with; + // - the storage proof of one or several keys; + // - id of the lane we prove state of. + type MessagesDeliveryProof = ToMillauMessagesDeliveryProof; + + fn verify_message(payload: &ToMillauMessagePayload) -> Result<(), Self::Error> { + messages::source::verify_chain_message::(payload) + } + + fn verify_messages_delivery_proof( + proof: Self::MessagesDeliveryProof, + ) -> Result<(LaneId, InboundLaneData), Self::Error> { + messages::source::verify_messages_delivery_proof::(proof) + } +} + +impl SourceHeaderChain for Millau { + type Error = &'static str; + // The proof is: + // - hash of the header this proof has been created with; + // - the storage proof of one or several keys; + // - id of the lane we prove messages for; + // - inclusive range of messages nonces that are proved. + type MessagesProof = FromMillauMessagesProof; + + fn verify_messages_proof( + proof: Self::MessagesProof, + messages_count: u32, + ) -> Result>, Self::Error> { + messages::target::verify_messages_proof::(proof, messages_count) + } +} + +/// Rialto -> Millau message lane pallet parameters. +#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)] +pub enum RialtoToMillauMessagesParameter { + /// The conversion formula we use is: `RialtoTokens = MillauTokens * conversion_rate`. + MillauToRialtoConversionRate(FixedU128), +} + +impl MessagesParameter for RialtoToMillauMessagesParameter { + fn save(&self) { + match *self { + RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) => { + MillauToRialtoConversionRate::set(conversion_rate) + } + } + } +} diff --git a/polkadot/bin/rialto/runtime/src/rialto_poa.rs b/polkadot/bin/rialto/runtime/src/rialto_poa.rs new file mode 100644 index 00000000000..83b263975a3 --- /dev/null +++ b/polkadot/bin/rialto/runtime/src/rialto_poa.rs @@ -0,0 +1,175 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Configuration parameters for the Rialto PoA chain. + +use crate::exchange::EthereumTransactionInclusionProof; + +use bp_eth_poa::{Address, AuraHeader, RawTransaction, U256}; +use bp_header_chain::InclusionProofVerifier; +use frame_support::RuntimeDebug; +use hex_literal::hex; +use pallet_bridge_eth_poa::{ + AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy, ValidatorsConfiguration, + ValidatorsSource, +}; +use sp_std::prelude::*; + +frame_support::parameter_types! { + pub const FinalityVotesCachingInterval: Option = Some(8); + pub BridgeAuraConfiguration: AuraConfiguration = + aura_configuration(); + pub BridgeValidatorsConfiguration: ValidatorsConfiguration = + validators_configuration(); +} + +/// Max number of finalized headers to keep. +const FINALIZED_HEADERS_TO_KEEP: u64 = 5_000; + +/// Aura engine configuration for Rialto chain. +pub fn aura_configuration() -> AuraConfiguration { + AuraConfiguration { + empty_steps_transition: 0xfffffffff, + strict_empty_steps_transition: 0, + validate_step_transition: 0, + validate_score_transition: 0, + two_thirds_majority_transition: u64::max_value(), + min_gas_limit: 0x1388.into(), + max_gas_limit: U256::max_value(), + maximum_extra_data_size: 0x20, + } +} + +/// Validators configuration for Rialto PoA chain. +pub fn validators_configuration() -> ValidatorsConfiguration { + ValidatorsConfiguration::Single(ValidatorsSource::List(genesis_validators())) +} + +/// Genesis validators set of Rialto PoA chain. +pub fn genesis_validators() -> Vec
{ + vec![ + hex!("005e714f896a8b7cede9d38688c1a81de72a58e4").into(), + hex!("007594304039c2937a12220338aab821d819f5a4").into(), + hex!("004e7a39907f090e19b0b80a277e77b72b22e269").into(), + ] +} + +/// Genesis header of the Rialto PoA chain. +/// +/// To obtain genesis header from a running node, invoke: +/// ```bash +/// $ http localhost:8545 jsonrpc=2.0 id=1 method=eth_getBlockByNumber params:='["earliest", false]' -v +/// ``` +pub fn genesis_header() -> AuraHeader { + AuraHeader { + parent_hash: Default::default(), + timestamp: 0, + number: 0, + author: Default::default(), + transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), + uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(), + extra_data: vec![], + state_root: hex!("a992d04c791620ed7ed96555a80cf0568355bb4bee2656f46899a4372f25f248").into(), + receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(), + log_bloom: Default::default(), + gas_used: Default::default(), + gas_limit: 0x222222.into(), + difficulty: 0x20000.into(), + seal: vec![vec![0x80], { + let mut vec = vec![0xb8, 0x41]; + vec.resize(67, 0); + vec + }], + } +} + +/// Rialto PoA headers pruning strategy. +/// +/// We do not prune unfinalized headers because exchange module only accepts +/// claims from finalized headers. And if we're pruning unfinalized headers, then +/// some claims may never be accepted. +#[derive(Default, RuntimeDebug)] +pub struct PruningStrategy; + +impl TPruningStrategy for PruningStrategy { + fn pruning_upper_bound(&mut self, _best_number: u64, best_finalized_number: u64) -> u64 { + best_finalized_number.saturating_sub(FINALIZED_HEADERS_TO_KEEP) + } +} + +/// ChainTime provider +#[derive(Default)] +pub struct ChainTime; + +impl TChainTime for ChainTime { + fn is_timestamp_ahead(&self, timestamp: u64) -> bool { + let now = super::Timestamp::now(); + timestamp > now + } +} + +/// The Rialto PoA Blockchain as seen by the runtime. +pub struct RialtoBlockchain; + +impl InclusionProofVerifier for RialtoBlockchain { + type Transaction = RawTransaction; + type TransactionInclusionProof = EthereumTransactionInclusionProof; + + fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { + let is_transaction_finalized = + crate::BridgeRialtoPoA::verify_transaction_finalized(proof.block, proof.index, &proof.proof); + + if !is_transaction_finalized { + return None; + } + + proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn genesis_hash_matches() { + assert_eq!( + genesis_header().compute_hash(), + hex!("1468e1a0fa20d30025a5a0f87e1cced4fdc393b84b7d2850b11ca5863db482cb").into(), + ); + } + + #[test] + fn pruning_strategy_keeps_enough_headers() { + assert_eq!( + PruningStrategy::default().pruning_upper_bound(100_000, 1_000), + 0, + "1_000 <= 5_000 => nothing should be pruned yet", + ); + + assert_eq!( + PruningStrategy::default().pruning_upper_bound(100_000, 5_000), + 0, + "5_000 <= 5_000 => nothing should be pruned yet", + ); + + assert_eq!( + PruningStrategy::default().pruning_upper_bound(100_000, 10_000), + 5_000, + "5_000 <= 10_000 => we're ready to prune first 5_000 headers", + ); + } +} diff --git a/polkadot/bin/runtime-common/Cargo.toml b/polkadot/bin/runtime-common/Cargo.toml new file mode 100644 index 00000000000..83803d06deb --- /dev/null +++ b/polkadot/bin/runtime-common/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "bridge-runtime-common" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/parity-bridges-common/" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +ed25519-dalek = { version = "1.0", default-features = false, optional = true } +hash-db = { version = "0.15.2", default-features = false } + +# Bridge dependencies + +bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } +pallet-bridge-dispatch = { path = "../../modules/dispatch", default-features = false } +pallet-bridge-grandpa = { path = "../../modules/grandpa", default-features = false } +pallet-bridge-messages = { path = "../../modules/messages", default-features = false } + +# Substrate dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[features] +default = ["std"] +std = [ + "bp-message-dispatch/std", + "bp-messages/std", + "bp-runtime/std", + "codec/std", + "frame-support/std", + "hash-db/std", + "pallet-bridge-dispatch/std", + "pallet-bridge-grandpa/std", + "pallet-bridge-messages/std", + "sp-core/std", + "sp-runtime/std", + "sp-state-machine/std", + "sp-std/std", + "sp-trie/std", +] +runtime-benchmarks = [ + "ed25519-dalek/u64_backend", + "pallet-bridge-grandpa/runtime-benchmarks", + "pallet-bridge-messages/runtime-benchmarks", + "sp-state-machine", +] diff --git a/polkadot/bin/runtime-common/README.md b/polkadot/bin/runtime-common/README.md new file mode 100644 index 00000000000..b375f48309c --- /dev/null +++ b/polkadot/bin/runtime-common/README.md @@ -0,0 +1,176 @@ +# Helpers for Messages Module Integration + +The [`messages`](./src/messages.rs) module of this crate contains a bunch of helpers for integrating +messages module into your runtime. Basic prerequisites of these helpers are: +- we're going to bridge Substrate-based chain with another Substrate-based chain; +- both chains have [messages module](../../modules/messages/README.md), Substrate bridge + module and the [call dispatch module](../../modules/dispatch/README.md); +- all message lanes are identical and may be used to transfer the same messages; +- the messages sent over the bridge are dispatched using + [call dispatch module](../../modules/dispatch/README.md); +- the messages are `pallet_bridge_dispatch::MessagePayload` structures, where `call` field is + encoded `Call` of the target chain. This means that the `Call` is opaque to the + [messages module](../../modules/messages/README.md) instance at the source chain. + It is pre-encoded by the message submitter; +- all proofs in the [messages module](../../modules/messages/README.md) transactions are + based on the storage proofs from the bridged chain: storage proof of the outbound message (value + from the `pallet_bridge_messages::Store::MessagePayload` map), storage proof of the outbound lane + state (value from the `pallet_bridge_messages::Store::OutboundLanes` map) and storage proof of the + inbound lane state (value from the `pallet_bridge_messages::Store::InboundLanes` map); +- storage proofs are built at the finalized headers of the corresponding chain. So all message lane + transactions with proofs are verifying storage proofs against finalized chain headers from + Substrate bridge module. + +**IMPORTANT NOTE**: after reading this document, you may refer to our test runtimes +([rialto_messages.rs](../millau/runtime/src/rialto_messages.rs) and/or +[millau_messages.rs](../rialto/runtime/src/millau_messages.rs)) to see how to use these helpers. + +## Contents +- [`MessageBridge` Trait](#messagebridge-trait) +- [`ChainWithMessages` Trait ](#ChainWithMessages-trait) +- [Helpers for the Source Chain](#helpers-for-the-source-chain) +- [Helpers for the Target Chain](#helpers-for-the-target-chain) + +## `MessageBridge` Trait + +The essence of your integration will be a struct that implements a `MessageBridge` trait. It has +single method (`MessageBridge::bridged_balance_to_this_balance`), used to convert from bridged chain +tokens into this chain tokens. The bridge also requires two associated types to be specified - +`ThisChain` and `BridgedChain`. + +Worth to say that if you're going to use hardcoded constant (conversion rate) in the +`MessageBridge::bridged_balance_to_this_balance` method (or in any other method of +`ThisChainWithMessages` or `BridgedChainWithMessages` traits), then you should take a +look at the +[messages parameters functionality](../../modules/messages/README.md#Non-Essential-Functionality). +They allow pallet owner to update constants more frequently than runtime upgrade happens. + +## `ChainWithMessages` Trait + +The trait is quite simple and can easily be implemented - you just need to specify types used at the +corresponding chain. There is single exception, though (it may be changed in the future): + +- `ChainWithMessages::MessagesInstance`: this is used to compute runtime storage keys. There + may be several instances of messages pallet, included in the Runtime. Every instance stores + messages and these messages stored under different keys. When we are verifying storage proofs from + the bridged chain, we should know which instance we're talking to. This is fine, but there's + significant inconvenience with that - this chain runtime must have the same messages pallet + instance. This does not necessarily mean that we should use the same instance on both chains - + this instance may be used to bridge with another chain/instance, or may not be used at all. + +## `ThisChainWithMessages` Trait + +This trait represents this chain from bridge point of view. Let's review every method of this trait: + +- `ThisChainWithMessages::is_outbound_lane_enabled`: is used to check whether given lane accepts + outbound messages. + +- `ThisChainWithMessages::maximal_pending_messages_at_outbound_lane`: you should return maximal + number of pending (undelivered) messages from this function. Returning small values would require + relayers to operate faster and could make message sending logic more complicated. On the other + hand, returning large values could lead to chain state growth. + +- `ThisChainWithMessages::estimate_delivery_confirmation_transaction`: you'll need to return + estimated size and dispatch weight of the delivery confirmation transaction (that happens on + this chain) from this function. + +- `ThisChainWithMessages::transaction_payment`: you'll need to return fee that the submitter + must pay for given transaction on this chain. Normally, you would use transaction payment pallet + for this. However, if your chain has non-zero fee multiplier set, this would mean that the + payment will be computed using current value of this multiplier. But since this transaction + will be submitted in the future, you may want to choose other value instead. Otherwise, + non-altruistic relayer may choose not to submit this transaction until number of transactions + will decrease. + +## `BridgedChainWithMessages` Trait + +This trait represents this chain from bridge point of view. Let's review every method of this trait: + +- `BridgedChainWithMessages::maximal_extrinsic_size`: you will need to return the maximal + extrinsic size of the target chain from this function. + +- `MessageBridge::message_weight_limits`: you'll need to return a range of + dispatch weights that the outbound message may take at the target chain. Please keep in mind that + our helpers assume that the message is an encoded call of the target chain. But we never decode + this call at the source chain. So you can't simply get dispatch weight from pre-dispatch + information. Instead there are two options to prepare this range: if you know which calls are to + be sent over your bridge, then you may just return weight ranges for these particular calls. + Otherwise, if you're going to accept all kinds of calls, you may just return range `[0; maximal + incoming message dispatch weight]`. If you choose the latter, then you shall remember that the + delivery transaction itself has some weight, so you can't accept messages with weight equal to + maximal weight of extrinsic at the target chain. In our test chains, we reject all messages that + have declared dispatch weight larger than 50% of the maximal bridged extrinsic weight. + +- `MessageBridge::estimate_delivery_transaction`: you will need to return estimated dispatch weight and + size of the delivery transaction that delivers a given message to the target chain. + +- `MessageBridge::transaction_payment`: you'll need to return fee that the submitter + must pay for given transaction on bridged chain. The best case is when you have the same conversion + formula on both chains - then you may just reuse the `ThisChainWithMessages::transaction_payment` + implementation. Otherwise, you'll need to hardcode this formula into your runtime. + +## Helpers for the Source Chain + +The helpers for the Source Chain reside in the `source` submodule of the +[`messages`](./src/messages.rs) module. The structs are: `FromThisChainMessagePayload`, +`FromBridgedChainMessagesDeliveryProof`, `FromThisChainMessageVerifier`. And the helper functions +are: `maximal_message_size`, `verify_chain_message`, `verify_messages_delivery_proof` and +`estimate_message_dispatch_and_delivery_fee`. + +`FromThisChainMessagePayload` is a message that the sender sends through our bridge. It is the +`pallet_bridge_dispatch::MessagePayload`, where `call` field is encoded target chain call. So +at this chain we don't see internals of this call - we just know its size. + +`FromThisChainMessageVerifier` is an implementation of `bp_messages::LaneMessageVerifier`. It +has following checks in its `verify_message` method: + +1. it'll verify that the used outbound lane is enabled in our runtime; + +1. it'll reject messages if there are too many undelivered outbound messages at this lane. The + sender need to wait while relayers will do their work before sending the message again; + +1. it'll reject a message if it has the wrong dispatch origin declared. Like if the submitter is not + the root of this chain, but it tries to dispatch the message at the target chain using + `pallet_bridge_dispatch::CallOrigin::SourceRoot` origin. Or he has provided wrong signature + in the `pallet_bridge_dispatch::CallOrigin::TargetAccount` origin; + +1. it'll reject a message if the delivery and dispatch fee that the submitter wants to pay is lesser + than the fee that is computed using the `estimate_message_dispatch_and_delivery_fee` function. + +`estimate_message_dispatch_and_delivery_fee` returns a minimal fee that the submitter needs to pay +for sending a given message. The fee includes: payment for the delivery transaction at the target +chain, payment for delivery confirmation transaction on this chain, payment for `Call` dispatch at +the target chain and relayer interest. + +`FromBridgedChainMessagesDeliveryProof` holds the lane identifier and the storage proof of this +inbound lane state at the bridged chain. This also holds the hash of the target chain header, that +was used to generate this storage proof. The proof is verified by the +`verify_messages_delivery_proof`, which simply checks that the target chain header is finalized +(using Substrate bridge module) and then reads the inbound lane state from the proof. + +`verify_chain_message` function checks that the message may be delivered to the bridged chain. There +are two main checks: + +1. that the message size is less than or equal to the `2/3` of maximal extrinsic size at the target + chain. We leave `1/3` for signed extras and for the storage proof overhead; + +1. that the message dispatch weight is less than or equal to the `1/2` of maximal normal extrinsic + weight at the target chain. We leave `1/2` for the delivery transaction overhead. + +## Helpers for the Target Chain + +The helpers for the target chain reside in the `target` submodule of the +[`messages`](./src/messages.rs) module. The structs are: `FromBridgedChainMessagePayload`, +`FromBridgedChainMessagesProof`, `FromBridgedChainMessagesProof`. And the helper functions are: +`maximal_incoming_message_dispatch_weight`, `maximal_incoming_message_size` and +`verify_messages_proof`. + +`FromBridgedChainMessagePayload` corresponds to the `FromThisChainMessagePayload` at the bridged +chain. We expect that messages with this payload are stored in the `OutboundMessages` storage map of +the [messages module](../../modules/messages/README.md). This map is used to build +`FromBridgedChainMessagesProof`. The proof holds the lane id, range of message nonces included in +the proof, storage proof of `OutboundMessages` entries and the hash of bridged chain header that has +been used to build the proof. Additionally, there's storage proof may contain the proof of outbound +lane state. It may be required to prune `relayers` entries at this chain (see +[messages module documentation](../../modules/messages/README.md#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) +for details). This proof is verified by the `verify_messages_proof` function. diff --git a/polkadot/bin/runtime-common/src/lib.rs b/polkadot/bin/runtime-common/src/lib.rs new file mode 100644 index 00000000000..ae7efb4a419 --- /dev/null +++ b/polkadot/bin/runtime-common/src/lib.rs @@ -0,0 +1,22 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Common types/functions that may be used by runtimes of all bridged chains. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod messages; +pub mod messages_benchmarking; diff --git a/polkadot/bin/runtime-common/src/messages.rs b/polkadot/bin/runtime-common/src/messages.rs new file mode 100644 index 00000000000..8e83c0f94ad --- /dev/null +++ b/polkadot/bin/runtime-common/src/messages.rs @@ -0,0 +1,1441 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types that allow runtime to act as a source/target endpoint of message lanes. +//! +//! Messages are assumed to be encoded `Call`s of the target chain. Call-dispatch +//! pallet is used to dispatch incoming messages. Message identified by a tuple +//! of to elements - message lane id and message nonce. + +use bp_message_dispatch::MessageDispatch as _; +use bp_messages::{ + source_chain::{LaneMessageVerifier, Sender}, + target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages}, + InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, +}; +use bp_runtime::{InstanceId, Size, StorageProofChecker}; +use codec::{Decode, Encode}; +use frame_support::{traits::Instance, weights::Weight, RuntimeDebug}; +use hash_db::Hasher; +use sp_runtime::{ + traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul}, + FixedPointNumber, FixedPointOperand, FixedU128, +}; +use sp_std::{cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive, vec::Vec}; +use sp_trie::StorageProof; + +/// Bidirectional message bridge. +pub trait MessageBridge { + /// Instance id of this bridge. + const INSTANCE: InstanceId; + + /// Relayer interest (in percents). + const RELAYER_FEE_PERCENT: u32; + + /// This chain in context of message bridge. + type ThisChain: ThisChainWithMessages; + /// Bridged chain in context of message bridge. + type BridgedChain: BridgedChainWithMessages; + + /// Convert Bridged chain balance into This chain balance. + fn bridged_balance_to_this_balance(bridged_balance: BalanceOf>) -> BalanceOf>; +} + +/// Chain that has `pallet-bridge-messages` and `dispatch` modules. +pub trait ChainWithMessages { + /// Hash used in the chain. + type Hash: Decode; + /// Accound id on the chain. + type AccountId: Encode + Decode; + /// Public key of the chain account that may be used to verify signatures. + type Signer: Decode; + /// Signature type used on the chain. + type Signature: Decode; + /// Type of weight that is used on the chain. This would almost always be a regular + /// `frame_support::weight::Weight`. But since the meaning of weight on different chains + /// may be different, the `WeightOf<>` construct is used to avoid confusion between + /// different weights. + type Weight: From + PartialOrd; + /// Type of balances that is used on the chain. + type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From + Copy; + + /// Instance of the `pallet-bridge-messages` pallet. + type MessagesInstance: Instance; +} + +/// Message related transaction parameters estimation. +#[derive(RuntimeDebug)] +pub struct MessageTransaction { + /// The estimated dispatch weight of the transaction. + pub dispatch_weight: Weight, + /// The estimated size of the encoded transaction. + pub size: u32, +} + +/// This chain that has `pallet-bridge-messages` and `dispatch` modules. +pub trait ThisChainWithMessages: ChainWithMessages { + /// Call type on the chain. + type Call: Encode + Decode; + + /// Are we accepting any messages to the given lane? + fn is_outbound_lane_enabled(lane: &LaneId) -> bool; + + /// Maximal number of pending (not yet delivered) messages at This chain. + /// + /// Any messages over this limit, will be rejected. + fn maximal_pending_messages_at_outbound_lane() -> MessageNonce; + + /// Estimate size and weight of single message delivery confirmation transaction at This chain. + fn estimate_delivery_confirmation_transaction() -> MessageTransaction>; + + /// Returns minimal transaction fee that must be paid for given transaction at This chain. + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf; +} + +/// Bridged chain that has `pallet-bridge-messages` and `dispatch` modules. +pub trait BridgedChainWithMessages: ChainWithMessages { + /// Maximal extrinsic size at Bridged chain. + fn maximal_extrinsic_size() -> u32; + + /// Returns feasible weights range for given message payload at the Bridged chain. + /// + /// If message is being sent with the weight that is out of this range, then it + /// should be rejected. + /// + /// Weights returned from this function shall not include transaction overhead + /// (like weight of signature and signed extensions verification), because they're + /// already accounted by the `weight_of_delivery_transaction`. So this function should + /// return pure call dispatch weights range. + fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive; + + /// Estimate size and weight of single message delivery transaction at the Bridged chain. + fn estimate_delivery_transaction( + message_payload: &[u8], + message_dispatch_weight: WeightOf, + ) -> MessageTransaction>; + + /// Returns minimal transaction fee that must be paid for given transaction at the Bridged chain. + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf; +} + +pub(crate) type ThisChain = ::ThisChain; +pub(crate) type BridgedChain = ::BridgedChain; +pub(crate) type HashOf = ::Hash; +pub(crate) type AccountIdOf = ::AccountId; +pub(crate) type SignerOf = ::Signer; +pub(crate) type SignatureOf = ::Signature; +pub(crate) type WeightOf = ::Weight; +pub(crate) type BalanceOf = ::Balance; +pub(crate) type MessagesInstanceOf = ::MessagesInstance; + +pub(crate) type CallOf = ::Call; + +/// Raw storage proof type (just raw trie nodes). +type RawStorageProof = Vec>; + +/// Compute fee of transaction at runtime where regular transaction payment pallet is being used. +/// +/// The value of `multiplier` parameter is the expected value of `pallet_transaction_payment::NextFeeMultiplier` +/// at the moment when transaction is submitted. If you're charging this payment in advance (and that's what +/// happens with delivery and confirmation transaction in this crate), then there's a chance that the actual +/// fee will be larger than what is paid in advance. So the value must be chosen carefully. +pub fn transaction_payment( + base_extrinsic_weight: Weight, + per_byte_fee: Balance, + multiplier: FixedU128, + weight_to_fee: impl Fn(Weight) -> Balance, + transaction: MessageTransaction, +) -> Balance { + // base fee is charged for every tx + let base_fee = weight_to_fee(base_extrinsic_weight); + + // non-adjustable per-byte fee + let len_fee = per_byte_fee.saturating_mul(Balance::from(transaction.size)); + + // the adjustable part of the fee + let unadjusted_weight_fee = weight_to_fee(transaction.dispatch_weight); + let adjusted_weight_fee = multiplier.saturating_mul_int(unadjusted_weight_fee); + + base_fee.saturating_add(len_fee).saturating_add(adjusted_weight_fee) +} + +/// Sub-module that is declaring types required for processing This -> Bridged chain messages. +pub mod source { + use super::*; + + /// Encoded Call of the Bridged chain. We never try to decode it on This chain. + pub type BridgedChainOpaqueCall = Vec; + + /// Message payload for This -> Bridged chain messages. + pub type FromThisChainMessagePayload = pallet_bridge_dispatch::MessagePayload< + AccountIdOf>, + SignerOf>, + SignatureOf>, + BridgedChainOpaqueCall, + >; + + /// Messages delivery proof from bridged chain: + /// + /// - hash of finalized header; + /// - storage proof of inbound lane state; + /// - lane id. + #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug)] + pub struct FromBridgedChainMessagesDeliveryProof { + /// Hash of the bridge header the proof is for. + pub bridged_header_hash: BridgedHeaderHash, + /// Storage trie proof generated for [`Self::bridged_header_hash`]. + pub storage_proof: RawStorageProof, + /// Lane id of which messages were delivered and the proof is for. + pub lane: LaneId, + } + + impl Size for FromBridgedChainMessagesDeliveryProof { + fn size_hint(&self) -> u32 { + u32::try_from( + self.storage_proof + .iter() + .fold(0usize, |sum, node| sum.saturating_add(node.len())), + ) + .unwrap_or(u32::MAX) + } + } + + /// 'Parsed' message delivery proof - inbound lane id and its state. + pub type ParsedMessagesDeliveryProofFromBridgedChain = (LaneId, InboundLaneData>>); + + /// Message verifier that is doing all basic checks. + /// + /// This verifier assumes following: + /// + /// - all message lanes are equivalent, so all checks are the same; + /// - messages are being dispatched using `pallet-bridge-dispatch` pallet on the target chain. + /// + /// Following checks are made: + /// + /// - message is rejected if its lane is currently blocked; + /// - message is rejected if there are too many pending (undelivered) messages at the outbound lane; + /// - check that the sender has rights to dispatch the call on target chain using provided dispatch origin; + /// - check that the sender has paid enough funds for both message delivery and dispatch. + #[derive(RuntimeDebug)] + pub struct FromThisChainMessageVerifier(PhantomData); + + pub(crate) const OUTBOUND_LANE_DISABLED: &str = "The outbound message lane is disabled."; + pub(crate) const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane."; + pub(crate) const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin."; + pub(crate) const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane."; + + impl LaneMessageVerifier>, FromThisChainMessagePayload, BalanceOf>> + for FromThisChainMessageVerifier + where + B: MessageBridge, + AccountIdOf>: PartialEq + Clone, + { + type Error = &'static str; + + fn verify_message( + submitter: &Sender>>, + delivery_and_dispatch_fee: &BalanceOf>, + lane: &LaneId, + lane_outbound_data: &OutboundLaneData, + payload: &FromThisChainMessagePayload, + ) -> Result<(), Self::Error> { + // reject message if lane is blocked + if !ThisChain::::is_outbound_lane_enabled(lane) { + return Err(OUTBOUND_LANE_DISABLED); + } + + // reject message if there are too many pending messages at this lane + let max_pending_messages = ThisChain::::maximal_pending_messages_at_outbound_lane(); + let pending_messages = lane_outbound_data + .latest_generated_nonce + .saturating_sub(lane_outbound_data.latest_received_nonce); + if pending_messages > max_pending_messages { + return Err(TOO_MANY_PENDING_MESSAGES); + } + + // Do the dispatch-specific check. We assume that the target chain uses + // `Dispatch`, so we verify the message accordingly. + pallet_bridge_dispatch::verify_message_origin(submitter, payload).map_err(|_| BAD_ORIGIN)?; + + let minimal_fee_in_this_tokens = + estimate_message_dispatch_and_delivery_fee::(payload, B::RELAYER_FEE_PERCENT)?; + + // compare with actual fee paid + if *delivery_and_dispatch_fee < minimal_fee_in_this_tokens { + return Err(TOO_LOW_FEE); + } + + Ok(()) + } + } + + /// Return maximal message size of This -> Bridged chain message. + pub fn maximal_message_size() -> u32 { + super::target::maximal_incoming_message_size(BridgedChain::::maximal_extrinsic_size()) + } + + /// Do basic Bridged-chain specific verification of This -> Bridged chain message. + /// + /// Ok result from this function means that the delivery transaction with this message + /// may be 'mined' by the target chain. But the lane may have its own checks (e.g. fee + /// check) that would reject message (see `FromThisChainMessageVerifier`). + pub fn verify_chain_message( + payload: &FromThisChainMessagePayload, + ) -> Result<(), &'static str> { + let weight_limits = BridgedChain::::message_weight_limits(&payload.call); + if !weight_limits.contains(&payload.weight.into()) { + return Err("Incorrect message weight declared"); + } + + // The maximal size of extrinsic at Substrate-based chain depends on the + // `frame_system::Config::MaximumBlockLength` and `frame_system::Config::AvailableBlockRatio` + // constants. This check is here to be sure that the lane won't stuck because message is too + // large to fit into delivery transaction. + // + // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not + // the message itself. The proof is always larger than the message. But unless chain state + // is enormously large, it should be several dozens/hundreds of bytes. The delivery + // transaction also contains signatures and signed extensions. Because of this, we reserve + // 1/3 of the the maximal extrinsic weight for this data. + if payload.call.len() > maximal_message_size::() as usize { + return Err("The message is too large to be sent over the lane"); + } + + Ok(()) + } + + /// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged chain. + /// + /// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional conversions. + /// Returns `None` if overflow has happened. + pub fn estimate_message_dispatch_and_delivery_fee( + payload: &FromThisChainMessagePayload, + relayer_fee_percent: u32, + ) -> Result>, &'static str> { + // the fee (in Bridged tokens) of all transactions that are made on the Bridged chain + let delivery_transaction = + BridgedChain::::estimate_delivery_transaction(&payload.call, payload.weight.into()); + let delivery_transaction_fee = BridgedChain::::transaction_payment(delivery_transaction); + + // the fee (in This tokens) of all transactions that are made on This chain + let confirmation_transaction = ThisChain::::estimate_delivery_confirmation_transaction(); + let confirmation_transaction_fee = ThisChain::::transaction_payment(confirmation_transaction); + + // minimal fee (in This tokens) is a sum of all required fees + let minimal_fee = + B::bridged_balance_to_this_balance(delivery_transaction_fee).checked_add(&confirmation_transaction_fee); + + // before returning, add extra fee that is paid to the relayer (relayer interest) + minimal_fee + .and_then(|fee| + // having message with fee that is near the `Balance::MAX_VALUE` of the chain is + // unlikely and should be treated as an error + // => let's do multiplication first + fee + .checked_mul(&relayer_fee_percent.into()) + .and_then(|interest| interest.checked_div(&100u32.into())) + .and_then(|interest| fee.checked_add(&interest))) + .ok_or("Overflow when computing minimal required message delivery and dispatch fee") + } + + /// Verify proof of This -> Bridged chain messages delivery. + pub fn verify_messages_delivery_proof( + proof: FromBridgedChainMessagesDeliveryProof>>, + ) -> Result, &'static str> + where + ThisRuntime: pallet_bridge_grandpa::Config, + ThisRuntime: pallet_bridge_messages::Config>>, + HashOf>: Into::BridgedChain>>, + { + let FromBridgedChainMessagesDeliveryProof { + bridged_header_hash, + storage_proof, + lane, + } = proof; + pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( + bridged_header_hash.into(), + StorageProof::new(storage_proof), + |storage| { + // Messages delivery proof is just proof of single storage key read => any error + // is fatal. + let storage_inbound_lane_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::< + ThisRuntime, + MessagesInstanceOf>, + >(&lane); + let raw_inbound_lane_data = storage + .read_value(storage_inbound_lane_data_key.0.as_ref()) + .map_err(|_| "Failed to read inbound lane state from storage proof")? + .ok_or("Inbound lane state is missing from the messages proof")?; + let inbound_lane_data = InboundLaneData::decode(&mut &raw_inbound_lane_data[..]) + .map_err(|_| "Failed to decode inbound lane state from the proof")?; + + Ok((lane, inbound_lane_data)) + }, + ) + .map_err(<&'static str>::from)? + } +} + +/// Sub-module that is declaring types required for processing Bridged -> This chain messages. +pub mod target { + use super::*; + + /// Call origin for Bridged -> This chain messages. + pub type FromBridgedChainMessageCallOrigin = pallet_bridge_dispatch::CallOrigin< + AccountIdOf>, + SignerOf>, + SignatureOf>, + >; + + /// Decoded Bridged -> This message payload. + pub type FromBridgedChainMessagePayload = pallet_bridge_dispatch::MessagePayload< + AccountIdOf>, + SignerOf>, + SignatureOf>, + FromBridgedChainEncodedMessageCall, + >; + + /// Messages proof from bridged chain: + /// + /// - hash of finalized header; + /// - storage proof of messages and (optionally) outbound lane state; + /// - lane id; + /// - nonces (inclusive range) of messages which are included in this proof. + #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug)] + pub struct FromBridgedChainMessagesProof { + /// Hash of the finalized bridged header the proof is for. + pub bridged_header_hash: BridgedHeaderHash, + /// A storage trie proof of messages being delivered. + pub storage_proof: RawStorageProof, + pub lane: LaneId, + /// Nonce of the first message being delivered. + pub nonces_start: MessageNonce, + /// Nonce of the last message being delivered. + pub nonces_end: MessageNonce, + } + + impl Size for FromBridgedChainMessagesProof { + fn size_hint(&self) -> u32 { + u32::try_from( + self.storage_proof + .iter() + .fold(0usize, |sum, node| sum.saturating_add(node.len())), + ) + .unwrap_or(u32::MAX) + } + } + + /// Encoded Call of This chain as it is transferred over bridge. + /// + /// Our Call is opaque (`Vec`) for Bridged chain. So it is encoded, prefixed with + /// vector length. Custom decode implementation here is exactly to deal with this. + #[derive(Decode, Encode, RuntimeDebug, PartialEq)] + pub struct FromBridgedChainEncodedMessageCall { + pub(crate) encoded_call: Vec, + pub(crate) _marker: PhantomData, + } + + impl From> for Result>, ()> { + fn from(encoded_call: FromBridgedChainEncodedMessageCall) -> Self { + CallOf::>::decode(&mut &encoded_call.encoded_call[..]).map_err(drop) + } + } + + /// Dispatching Bridged -> This chain messages. + #[derive(RuntimeDebug, Clone, Copy)] + pub struct FromBridgedChainMessageDispatch { + _marker: PhantomData<(B, ThisRuntime, ThisDispatchInstance)>, + } + + impl + MessageDispatch< as ChainWithMessages>::Balance> + for FromBridgedChainMessageDispatch + where + ThisDispatchInstance: frame_support::traits::Instance, + ThisRuntime: pallet_bridge_dispatch::Config, + >::Event: + From>, + pallet_bridge_dispatch::Pallet: + bp_message_dispatch::MessageDispatch<(LaneId, MessageNonce), Message = FromBridgedChainMessagePayload>, + { + type DispatchPayload = FromBridgedChainMessagePayload; + + fn dispatch_weight( + message: &DispatchMessage>>, + ) -> frame_support::weights::Weight { + message.data.payload.as_ref().map(|payload| payload.weight).unwrap_or(0) + } + + fn dispatch(message: DispatchMessage>>) { + let message_id = (message.key.lane_id, message.key.nonce); + pallet_bridge_dispatch::Pallet::::dispatch( + B::INSTANCE, + message_id, + message.data.payload.map_err(drop), + ); + } + } + + /// Return maximal dispatch weight of the message we're able to receive. + pub fn maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { + maximal_extrinsic_weight / 2 + } + + /// Return maximal message size given maximal extrinsic size. + pub fn maximal_incoming_message_size(maximal_extrinsic_size: u32) -> u32 { + maximal_extrinsic_size / 3 * 2 + } + + /// Verify proof of Bridged -> This chain messages. + /// + /// The `messages_count` argument verification (sane limits) is supposed to be made + /// outside of this function. This function only verifies that the proof declares exactly + /// `messages_count` messages. + pub fn verify_messages_proof( + proof: FromBridgedChainMessagesProof>>, + messages_count: u32, + ) -> Result>>>, &'static str> + where + ThisRuntime: pallet_bridge_grandpa::Config, + ThisRuntime: pallet_bridge_messages::Config>>, + HashOf>: Into::BridgedChain>>, + { + verify_messages_proof_with_parser::( + proof, + messages_count, + |bridged_header_hash, bridged_storage_proof| { + pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( + bridged_header_hash.into(), + StorageProof::new(bridged_storage_proof), + |storage_adapter| storage_adapter, + ) + .map(|storage| StorageProofCheckerAdapter::<_, B, ThisRuntime> { + storage, + _dummy: Default::default(), + }) + .map_err(|err| MessageProofError::Custom(err.into())) + }, + ) + .map_err(Into::into) + } + + #[derive(Debug, PartialEq)] + pub(crate) enum MessageProofError { + Empty, + MessagesCountMismatch, + MissingRequiredMessage, + FailedToDecodeMessage, + FailedToDecodeOutboundLaneState, + Custom(&'static str), + } + + impl From for &'static str { + fn from(err: MessageProofError) -> &'static str { + match err { + MessageProofError::Empty => "Messages proof is empty", + MessageProofError::MessagesCountMismatch => "Declared messages count doesn't match actual value", + MessageProofError::MissingRequiredMessage => "Message is missing from the proof", + MessageProofError::FailedToDecodeMessage => "Failed to decode message from the proof", + MessageProofError::FailedToDecodeOutboundLaneState => { + "Failed to decode outbound lane data from the proof" + } + MessageProofError::Custom(err) => err, + } + } + } + + pub(crate) trait MessageProofParser { + fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option>; + fn read_raw_message(&self, message_key: &MessageKey) -> Option>; + } + + struct StorageProofCheckerAdapter { + storage: StorageProofChecker, + _dummy: sp_std::marker::PhantomData<(B, ThisRuntime)>, + } + + impl MessageProofParser for StorageProofCheckerAdapter + where + H: Hasher, + B: MessageBridge, + ThisRuntime: pallet_bridge_messages::Config>>, + { + fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option> { + let storage_outbound_lane_data_key = pallet_bridge_messages::storage_keys::outbound_lane_data_key::< + MessagesInstanceOf>, + >(lane_id); + self.storage + .read_value(storage_outbound_lane_data_key.0.as_ref()) + .ok()? + } + + fn read_raw_message(&self, message_key: &MessageKey) -> Option> { + let storage_message_key = pallet_bridge_messages::storage_keys::message_key::< + ThisRuntime, + MessagesInstanceOf>, + >(&message_key.lane_id, message_key.nonce); + self.storage.read_value(storage_message_key.0.as_ref()).ok()? + } + } + + /// Verify proof of Bridged -> This chain messages using given message proof parser. + pub(crate) fn verify_messages_proof_with_parser( + proof: FromBridgedChainMessagesProof>>, + messages_count: u32, + build_parser: BuildParser, + ) -> Result>>>, MessageProofError> + where + BuildParser: FnOnce(HashOf>, RawStorageProof) -> Result, + Parser: MessageProofParser, + { + let FromBridgedChainMessagesProof { + bridged_header_hash, + storage_proof, + lane, + nonces_start, + nonces_end, + } = proof; + + // receiving proofs where end < begin is ok (if proof includes outbound lane state) + let messages_in_the_proof = if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) { + // let's check that the user (relayer) has passed correct `messages_count` + // (this bounds maximal capacity of messages vec below) + let messages_in_the_proof = nonces_difference.saturating_add(1); + if messages_in_the_proof != MessageNonce::from(messages_count) { + return Err(MessageProofError::MessagesCountMismatch); + } + + messages_in_the_proof + } else { + 0 + }; + + let parser = build_parser(bridged_header_hash, storage_proof)?; + + // Read messages first. All messages that are claimed to be in the proof must + // be in the proof. So any error in `read_value`, or even missing value is fatal. + // + // Mind that we allow proofs with no messages if outbound lane state is proved. + let mut messages = Vec::with_capacity(messages_in_the_proof as _); + for nonce in nonces_start..=nonces_end { + let message_key = MessageKey { lane_id: lane, nonce }; + let raw_message_data = parser + .read_raw_message(&message_key) + .ok_or(MessageProofError::MissingRequiredMessage)?; + let message_data = MessageData::>>::decode(&mut &raw_message_data[..]) + .map_err(|_| MessageProofError::FailedToDecodeMessage)?; + messages.push(Message { + key: message_key, + data: message_data, + }); + } + + // Now let's check if proof contains outbound lane state proof. It is optional, so we + // simply ignore `read_value` errors and missing value. + let mut proved_lane_messages = ProvedLaneMessages { + lane_state: None, + messages, + }; + let raw_outbound_lane_data = parser.read_raw_outbound_lane_data(&lane); + if let Some(raw_outbound_lane_data) = raw_outbound_lane_data { + proved_lane_messages.lane_state = Some( + OutboundLaneData::decode(&mut &raw_outbound_lane_data[..]) + .map_err(|_| MessageProofError::FailedToDecodeOutboundLaneState)?, + ); + } + + // Now we may actually check if the proof is empty or not. + if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { + return Err(MessageProofError::Empty); + } + + // We only support single lane messages in this schema + let mut proved_messages = ProvedMessages::new(); + proved_messages.insert(lane, proved_lane_messages); + + Ok(proved_messages) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use codec::{Decode, Encode}; + use frame_support::weights::Weight; + use std::ops::RangeInclusive; + + const DELIVERY_TRANSACTION_WEIGHT: Weight = 100; + const DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT: Weight = 100; + const THIS_CHAIN_WEIGHT_TO_BALANCE_RATE: Weight = 2; + const BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE: Weight = 4; + const BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE: u32 = 6; + const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: Weight = 2048; + const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; + + /// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from BridgedChain; + #[derive(Debug, PartialEq, Eq)] + struct OnThisChainBridge; + + impl MessageBridge for OnThisChainBridge { + const INSTANCE: InstanceId = *b"this"; + const RELAYER_FEE_PERCENT: u32 = 10; + + type ThisChain = ThisChain; + type BridgedChain = BridgedChain; + + fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance { + ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32) + } + } + + /// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from ThisChain; + #[derive(Debug, PartialEq, Eq)] + struct OnBridgedChainBridge; + + impl MessageBridge for OnBridgedChainBridge { + const INSTANCE: InstanceId = *b"brdg"; + const RELAYER_FEE_PERCENT: u32 = 20; + + type ThisChain = BridgedChain; + type BridgedChain = ThisChain; + + fn bridged_balance_to_this_balance(_this_balance: ThisChainBalance) -> BridgedChainBalance { + unreachable!() + } + } + + #[derive(Debug, PartialEq, Decode, Encode, Clone)] + struct ThisChainAccountId(u32); + #[derive(Debug, PartialEq, Decode, Encode)] + struct ThisChainSigner(u32); + #[derive(Debug, PartialEq, Decode, Encode)] + struct ThisChainSignature(u32); + #[derive(Debug, PartialEq, Decode, Encode)] + enum ThisChainCall { + #[codec(index = 42)] + Transfer, + #[codec(index = 84)] + Mint, + } + + #[derive(Debug, PartialEq, Decode, Encode)] + struct BridgedChainAccountId(u32); + #[derive(Debug, PartialEq, Decode, Encode)] + struct BridgedChainSigner(u32); + #[derive(Debug, PartialEq, Decode, Encode)] + struct BridgedChainSignature(u32); + #[derive(Debug, PartialEq, Decode, Encode)] + enum BridgedChainCall {} + + macro_rules! impl_wrapped_balance { + ($name:ident) => { + #[derive(Debug, PartialEq, Decode, Encode, Clone, Copy)] + struct $name(u32); + + impl From for $name { + fn from(balance: u32) -> Self { + Self(balance) + } + } + + impl sp_std::ops::Add for $name { + type Output = $name; + + fn add(self, other: Self) -> Self { + Self(self.0 + other.0) + } + } + + impl sp_std::ops::Div for $name { + type Output = $name; + + fn div(self, other: Self) -> Self { + Self(self.0 / other.0) + } + } + + impl sp_std::ops::Mul for $name { + type Output = $name; + + fn mul(self, other: Self) -> Self { + Self(self.0 * other.0) + } + } + + impl sp_std::cmp::PartialOrd for $name { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.partial_cmp(&other.0) + } + } + + impl CheckedAdd for $name { + fn checked_add(&self, other: &Self) -> Option { + self.0.checked_add(other.0).map(Self) + } + } + + impl CheckedDiv for $name { + fn checked_div(&self, other: &Self) -> Option { + self.0.checked_div(other.0).map(Self) + } + } + + impl CheckedMul for $name { + fn checked_mul(&self, other: &Self) -> Option { + self.0.checked_mul(other.0).map(Self) + } + } + }; + } + + impl_wrapped_balance!(ThisChainBalance); + impl_wrapped_balance!(BridgedChainBalance); + + struct ThisChain; + + impl ChainWithMessages for ThisChain { + type Hash = (); + type AccountId = ThisChainAccountId; + type Signer = ThisChainSigner; + type Signature = ThisChainSignature; + type Weight = frame_support::weights::Weight; + type Balance = ThisChainBalance; + + type MessagesInstance = pallet_bridge_messages::DefaultInstance; + } + + impl ThisChainWithMessages for ThisChain { + type Call = ThisChainCall; + + fn is_outbound_lane_enabled(lane: &LaneId) -> bool { + lane == TEST_LANE_ID + } + + fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { + MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE + } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction> { + MessageTransaction { + dispatch_weight: DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT, + size: 0, + } + } + + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { + ThisChainBalance(transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) + } + } + + impl BridgedChainWithMessages for ThisChain { + fn maximal_extrinsic_size() -> u32 { + unreachable!() + } + + fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { + unreachable!() + } + + fn estimate_delivery_transaction( + _message_payload: &[u8], + _message_dispatch_weight: WeightOf, + ) -> MessageTransaction> { + unreachable!() + } + + fn transaction_payment(_transaction: MessageTransaction>) -> BalanceOf { + unreachable!() + } + } + + struct BridgedChain; + + impl ChainWithMessages for BridgedChain { + type Hash = (); + type AccountId = BridgedChainAccountId; + type Signer = BridgedChainSigner; + type Signature = BridgedChainSignature; + type Weight = frame_support::weights::Weight; + type Balance = BridgedChainBalance; + + type MessagesInstance = pallet_bridge_messages::DefaultInstance; + } + + impl ThisChainWithMessages for BridgedChain { + type Call = BridgedChainCall; + + fn is_outbound_lane_enabled(_lane: &LaneId) -> bool { + unreachable!() + } + + fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { + unreachable!() + } + + fn estimate_delivery_confirmation_transaction() -> MessageTransaction> { + unreachable!() + } + + fn transaction_payment(_transaction: MessageTransaction>) -> BalanceOf { + unreachable!() + } + } + + impl BridgedChainWithMessages for BridgedChain { + fn maximal_extrinsic_size() -> u32 { + BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE + } + + fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive { + let begin = std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight); + begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + } + + fn estimate_delivery_transaction( + _message_payload: &[u8], + message_dispatch_weight: WeightOf, + ) -> MessageTransaction> { + MessageTransaction { + dispatch_weight: DELIVERY_TRANSACTION_WEIGHT + message_dispatch_weight, + size: 0, + } + } + + fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { + BridgedChainBalance(transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32) + } + } + + fn test_lane_outbound_data() -> OutboundLaneData { + OutboundLaneData::default() + } + + #[test] + fn message_from_bridged_chain_is_decoded() { + // the message is encoded on the bridged chain + let message_on_bridged_chain = source::FromThisChainMessagePayload:: { + spec_version: 1, + weight: 100, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: ThisChainCall::Transfer.encode(), + } + .encode(); + + // and sent to this chain where it is decoded + let message_on_this_chain = + target::FromBridgedChainMessagePayload::::decode(&mut &message_on_bridged_chain[..]) + .unwrap(); + assert_eq!( + message_on_this_chain, + target::FromBridgedChainMessagePayload:: { + spec_version: 1, + weight: 100, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: target::FromBridgedChainEncodedMessageCall:: { + encoded_call: ThisChainCall::Transfer.encode(), + _marker: PhantomData::default(), + }, + } + ); + assert_eq!(Ok(ThisChainCall::Transfer), message_on_this_chain.call.into()); + } + + const TEST_LANE_ID: &LaneId = b"test"; + const MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE: MessageNonce = 32; + + fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload { + source::FromThisChainMessagePayload:: { + spec_version: 1, + weight: 100, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: vec![42], + } + } + + #[test] + fn message_fee_is_checked_by_verifier() { + const EXPECTED_MINIMAL_FEE: u32 = 5500; + + // payload of the This -> Bridged chain message + let payload = regular_outbound_message_payload(); + + // let's check if estimation matching hardcoded value + assert_eq!( + source::estimate_message_dispatch_and_delivery_fee::( + &payload, + OnThisChainBridge::RELAYER_FEE_PERCENT, + ), + Ok(ThisChainBalance(EXPECTED_MINIMAL_FEE)), + ); + + // and now check that the verifier checks the fee + assert_eq!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Root, + &ThisChainBalance(1), + &TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ), + Err(source::TOO_LOW_FEE) + ); + assert!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Root, + &ThisChainBalance(1_000_000), + &TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ) + .is_ok(), + ); + } + + #[test] + fn should_disallow_root_calls_from_regular_accounts() { + // payload of the This -> Bridged chain message + let payload = source::FromThisChainMessagePayload:: { + spec_version: 1, + weight: 100, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: vec![42], + }; + + // and now check that the verifier checks the fee + assert_eq!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Signed(ThisChainAccountId(0)), + &ThisChainBalance(1_000_000), + &TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ), + Err(source::BAD_ORIGIN) + ); + assert_eq!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::None, + &ThisChainBalance(1_000_000), + &TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ), + Err(source::BAD_ORIGIN) + ); + assert!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Root, + &ThisChainBalance(1_000_000), + &TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ) + .is_ok(), + ); + } + + #[test] + fn should_verify_source_and_target_origin_matching() { + // payload of the This -> Bridged chain message + let payload = source::FromThisChainMessagePayload:: { + spec_version: 1, + weight: 100, + origin: pallet_bridge_dispatch::CallOrigin::SourceAccount(ThisChainAccountId(1)), + call: vec![42], + }; + + // and now check that the verifier checks the fee + assert_eq!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Signed(ThisChainAccountId(0)), + &ThisChainBalance(1_000_000), + &TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ), + Err(source::BAD_ORIGIN) + ); + assert!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Signed(ThisChainAccountId(1)), + &ThisChainBalance(1_000_000), + &TEST_LANE_ID, + &test_lane_outbound_data(), + &payload, + ) + .is_ok(), + ); + } + + #[test] + fn message_is_rejected_when_sent_using_disabled_lane() { + assert_eq!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Root, + &ThisChainBalance(1_000_000), + b"dsbl", + &test_lane_outbound_data(), + ®ular_outbound_message_payload(), + ), + Err(source::OUTBOUND_LANE_DISABLED) + ); + } + + #[test] + fn message_is_rejected_when_there_are_too_many_pending_messages_at_outbound_lane() { + assert_eq!( + source::FromThisChainMessageVerifier::::verify_message( + &Sender::Root, + &ThisChainBalance(1_000_000), + &TEST_LANE_ID, + &OutboundLaneData { + latest_received_nonce: 100, + latest_generated_nonce: 100 + MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE + 1, + ..Default::default() + }, + ®ular_outbound_message_payload(), + ), + Err(source::TOO_MANY_PENDING_MESSAGES) + ); + } + + #[test] + fn verify_chain_message_rejects_message_with_too_small_declared_weight() { + assert!( + source::verify_chain_message::(&source::FromThisChainMessagePayload::< + OnThisChainBridge, + > { + spec_version: 1, + weight: 5, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: vec![1, 2, 3, 4, 5, 6], + },) + .is_err() + ); + } + + #[test] + fn verify_chain_message_rejects_message_with_too_large_declared_weight() { + assert!( + source::verify_chain_message::(&source::FromThisChainMessagePayload::< + OnThisChainBridge, + > { + spec_version: 1, + weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: vec![1, 2, 3, 4, 5, 6], + },) + .is_err() + ); + } + + #[test] + fn verify_chain_message_rejects_message_too_large_message() { + assert!( + source::verify_chain_message::(&source::FromThisChainMessagePayload::< + OnThisChainBridge, + > { + spec_version: 1, + weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: vec![0; source::maximal_message_size::() as usize + 1], + },) + .is_err() + ); + } + + #[test] + fn verify_chain_message_accepts_maximal_message() { + assert_eq!( + source::verify_chain_message::(&source::FromThisChainMessagePayload::< + OnThisChainBridge, + > { + spec_version: 1, + weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, + origin: pallet_bridge_dispatch::CallOrigin::SourceRoot, + call: vec![0; source::maximal_message_size::() as _], + },), + Ok(()), + ); + } + + #[derive(Debug)] + struct TestMessageProofParser { + failing: bool, + messages: RangeInclusive, + outbound_lane_data: Option, + } + + impl target::MessageProofParser for TestMessageProofParser { + fn read_raw_outbound_lane_data(&self, _lane_id: &LaneId) -> Option> { + if self.failing { + Some(vec![]) + } else { + self.outbound_lane_data.clone().map(|data| data.encode()) + } + } + + fn read_raw_message(&self, message_key: &MessageKey) -> Option> { + if self.failing { + Some(vec![]) + } else if self.messages.contains(&message_key.nonce) { + Some( + MessageData:: { + payload: message_key.nonce.encode(), + fee: BridgedChainBalance(0), + } + .encode(), + ) + } else { + None + } + } + } + + #[allow(clippy::reversed_empty_ranges)] + fn no_messages_range() -> RangeInclusive { + 1..=0 + } + + fn messages_proof(nonces_end: MessageNonce) -> target::FromBridgedChainMessagesProof<()> { + target::FromBridgedChainMessagesProof { + bridged_header_hash: (), + storage_proof: vec![], + lane: Default::default(), + nonces_start: 1, + nonces_end, + } + } + + #[test] + fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { + assert_eq!( + target::verify_messages_proof_with_parser::( + messages_proof(10), + 5, + |_, _| unreachable!(), + ), + Err(target::MessageProofError::MessagesCountMismatch), + ); + } + + #[test] + fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { + assert_eq!( + target::verify_messages_proof_with_parser::( + messages_proof(10), + 15, + |_, _| unreachable!(), + ), + Err(target::MessageProofError::MessagesCountMismatch), + ); + } + + #[test] + fn message_proof_is_rejected_if_build_parser_fails() { + assert_eq!( + target::verify_messages_proof_with_parser::( + messages_proof(10), + 10, + |_, _| Err(target::MessageProofError::Custom("test")), + ), + Err(target::MessageProofError::Custom("test")), + ); + } + + #[test] + fn message_proof_is_rejected_if_required_message_is_missing() { + assert_eq!( + target::verify_messages_proof_with_parser::(messages_proof(10), 10, |_, _| Ok( + TestMessageProofParser { + failing: false, + messages: 1..=5, + outbound_lane_data: None, + } + ),), + Err(target::MessageProofError::MissingRequiredMessage), + ); + } + + #[test] + fn message_proof_is_rejected_if_message_decode_fails() { + assert_eq!( + target::verify_messages_proof_with_parser::(messages_proof(10), 10, |_, _| Ok( + TestMessageProofParser { + failing: true, + messages: 1..=10, + outbound_lane_data: None, + } + ),), + Err(target::MessageProofError::FailedToDecodeMessage), + ); + } + + #[test] + fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { + assert_eq!( + target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( + TestMessageProofParser { + failing: true, + messages: no_messages_range(), + outbound_lane_data: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + } + ),), + Err(target::MessageProofError::FailedToDecodeOutboundLaneState), + ); + } + + #[test] + fn message_proof_is_rejected_if_it_is_empty() { + assert_eq!( + target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( + TestMessageProofParser { + failing: false, + messages: no_messages_range(), + outbound_lane_data: None, + } + ),), + Err(target::MessageProofError::Empty), + ); + } + + #[test] + fn non_empty_message_proof_without_messages_is_accepted() { + assert_eq!( + target::verify_messages_proof_with_parser::(messages_proof(0), 0, |_, _| Ok( + TestMessageProofParser { + failing: false, + messages: no_messages_range(), + outbound_lane_data: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + } + ),), + Ok(vec![( + Default::default(), + ProvedLaneMessages { + lane_state: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + messages: Vec::new(), + }, + )] + .into_iter() + .collect()), + ); + } + + #[test] + fn non_empty_message_proof_is_accepted() { + assert_eq!( + target::verify_messages_proof_with_parser::(messages_proof(1), 1, |_, _| Ok( + TestMessageProofParser { + failing: false, + messages: 1..=1, + outbound_lane_data: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + } + ),), + Ok(vec![( + Default::default(), + ProvedLaneMessages { + lane_state: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + messages: vec![Message { + key: MessageKey { + lane_id: Default::default(), + nonce: 1 + }, + data: MessageData { + payload: 1u64.encode(), + fee: BridgedChainBalance(0) + }, + }], + }, + )] + .into_iter() + .collect()), + ); + } + + #[test] + fn verify_messages_proof_with_parser_does_not_panic_if_messages_count_mismatches() { + assert_eq!( + target::verify_messages_proof_with_parser::( + messages_proof(u64::MAX), + 0, + |_, _| Ok(TestMessageProofParser { + failing: false, + messages: 0..=u64::MAX, + outbound_lane_data: Some(OutboundLaneData { + oldest_unpruned_nonce: 1, + latest_received_nonce: 1, + latest_generated_nonce: 1, + }), + }), + ), + Err(target::MessageProofError::MessagesCountMismatch), + ); + } + + #[test] + fn transaction_payment_works_with_zero_multiplier() { + assert_eq!( + transaction_payment( + 100, + 10, + FixedU128::zero(), + |weight| weight, + MessageTransaction { + size: 50, + dispatch_weight: 777 + }, + ), + 100 + 50 * 10, + ); + } + + #[test] + fn transaction_payment_works_with_non_zero_multiplier() { + assert_eq!( + transaction_payment( + 100, + 10, + FixedU128::one(), + |weight| weight, + MessageTransaction { + size: 50, + dispatch_weight: 777 + }, + ), + 100 + 50 * 10 + 777, + ); + } +} diff --git a/polkadot/bin/runtime-common/src/messages_benchmarking.rs b/polkadot/bin/runtime-common/src/messages_benchmarking.rs new file mode 100644 index 00000000000..639e5f6c504 --- /dev/null +++ b/polkadot/bin/runtime-common/src/messages_benchmarking.rs @@ -0,0 +1,226 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Everything required to run benchmarks of messages module, based on +//! `bridge_runtime_common::messages` implementation. + +#![cfg(feature = "runtime-benchmarks")] + +use crate::messages::{ + source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, AccountIdOf, BalanceOf, + BridgedChain, HashOf, MessageBridge, ThisChain, +}; + +use bp_messages::{LaneId, MessageData, MessageKey, MessagePayload}; +use codec::Encode; +use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH}; +use frame_support::weights::Weight; +use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams, ProofSize}; +use sp_core::Hasher; +use sp_runtime::traits::Header; +use sp_std::prelude::*; +use sp_trie::{record_all_keys, trie_types::TrieDBMut, Layout, MemoryDB, Recorder, TrieMut}; + +/// Generate ed25519 signature to be used in `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`. +/// +/// Returns public key of the signer and the signature itself. +pub fn ed25519_sign(target_call: &impl Encode, source_account_id: &impl Encode) -> ([u8; 32], [u8; 64]) { + // key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html) + let target_secret = SecretKey::from_bytes(&[ + 157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, 197, 105, 123, 050, + 105, 025, 112, 059, 172, 003, 028, 174, 127, 096, + ]) + .expect("harcoded key is valid"); + let target_public: PublicKey = (&target_secret).into(); + + let mut target_pair_bytes = [0u8; KEYPAIR_LENGTH]; + target_pair_bytes[..SECRET_KEY_LENGTH].copy_from_slice(&target_secret.to_bytes()); + target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes()); + let target_pair = ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid"); + + let mut signature_message = Vec::new(); + target_call.encode_to(&mut signature_message); + source_account_id.encode_to(&mut signature_message); + let target_origin_signature = target_pair + .try_sign(&signature_message) + .expect("Ed25519 try_sign should not fail in benchmarks"); + + (target_public.to_bytes(), target_origin_signature.to_bytes()) +} + +/// Prepare proof of messages for the `receive_messages_proof` call. +pub fn prepare_message_proof( + params: MessageProofParams, + make_bridged_message_storage_key: MM, + make_bridged_outbound_lane_data_key: ML, + make_bridged_header: MH, + message_dispatch_weight: Weight, + message_payload: MessagePayload, +) -> (FromBridgedChainMessagesProof>>, Weight) +where + B: MessageBridge, + H: Hasher, + R: pallet_bridge_grandpa::Config, + FI: 'static, + ::Hash: Into>>, + MM: Fn(MessageKey) -> Vec, + ML: Fn(LaneId) -> Vec, + MH: Fn(H::Out) -> ::Header, +{ + // prepare Bridged chain storage with messages and (optionally) outbound lane state + let message_count = params + .message_nonces + .end() + .saturating_sub(*params.message_nonces.start()) + + 1; + let mut storage_keys = Vec::with_capacity(message_count as usize + 1); + let mut root = Default::default(); + let mut mdb = MemoryDB::default(); + { + let mut trie = TrieDBMut::::new(&mut mdb, &mut root); + + // insert messages + for nonce in params.message_nonces.clone() { + let message_key = MessageKey { + lane_id: params.lane, + nonce, + }; + let message_data = MessageData { + fee: BalanceOf::>::from(0), + payload: message_payload.clone(), + }; + let storage_key = make_bridged_message_storage_key(message_key); + trie.insert(&storage_key, &message_data.encode()) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + storage_keys.push(storage_key); + } + + // insert outbound lane state + if let Some(outbound_lane_data) = params.outbound_lane_data { + let storage_key = make_bridged_outbound_lane_data_key(params.lane); + trie.insert(&storage_key, &outbound_lane_data.encode()) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + storage_keys.push(storage_key); + } + } + root = grow_trie(root, &mut mdb, params.size); + + // generate storage proof to be delivered to This chain + let mut proof_recorder = Recorder::::new(); + record_all_keys::, _>(&mdb, &root, &mut proof_recorder) + .map_err(|_| "record_all_keys has failed") + .expect("record_all_keys should not fail in benchmarks"); + let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect(); + + // prepare Bridged chain header and insert it into the Substrate pallet + let bridged_header = make_bridged_header(root); + let bridged_header_hash = bridged_header.hash(); + pallet_bridge_grandpa::initialize_for_benchmarks::(bridged_header); + + ( + FromBridgedChainMessagesProof { + bridged_header_hash: bridged_header_hash.into(), + storage_proof, + lane: params.lane, + nonces_start: *params.message_nonces.start(), + nonces_end: *params.message_nonces.end(), + }, + message_dispatch_weight + .checked_mul(message_count) + .expect("too many messages requested by benchmark"), + ) +} + +/// Prepare proof of messages delivery for the `receive_messages_delivery_proof` call. +pub fn prepare_message_delivery_proof( + params: MessageDeliveryProofParams>>, + make_bridged_inbound_lane_data_key: ML, + make_bridged_header: MH, +) -> FromBridgedChainMessagesDeliveryProof>> +where + B: MessageBridge, + H: Hasher, + R: pallet_bridge_grandpa::Config, + FI: 'static, + ::Hash: Into>>, + ML: Fn(LaneId) -> Vec, + MH: Fn(H::Out) -> ::Header, +{ + // prepare Bridged chain storage with inbound lane state + let storage_key = make_bridged_inbound_lane_data_key(params.lane); + let mut root = Default::default(); + let mut mdb = MemoryDB::default(); + { + let mut trie = TrieDBMut::::new(&mut mdb, &mut root); + trie.insert(&storage_key, ¶ms.inbound_lane_data.encode()) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + } + root = grow_trie(root, &mut mdb, params.size); + + // generate storage proof to be delivered to This chain + let mut proof_recorder = Recorder::::new(); + record_all_keys::, _>(&mdb, &root, &mut proof_recorder) + .map_err(|_| "record_all_keys has failed") + .expect("record_all_keys should not fail in benchmarks"); + let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect(); + + // prepare Bridged chain header and insert it into the Substrate pallet + let bridged_header = make_bridged_header(root); + let bridged_header_hash = bridged_header.hash(); + pallet_bridge_grandpa::initialize_for_benchmarks::(bridged_header); + + FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: bridged_header_hash.into(), + storage_proof, + lane: params.lane, + } +} + +/// Populate trie with dummy keys+values until trie has at least given size. +fn grow_trie(mut root: H::Out, mdb: &mut MemoryDB, trie_size: ProofSize) -> H::Out { + let (iterations, leaf_size, minimal_trie_size) = match trie_size { + ProofSize::Minimal(_) => return root, + ProofSize::HasLargeLeaf(size) => (1, size, size), + ProofSize::HasExtraNodes(size) => (8, 1, size), + }; + + let mut key_index = 0; + loop { + // generate storage proof to be delivered to This chain + let mut proof_recorder = Recorder::::new(); + record_all_keys::, _>(mdb, &root, &mut proof_recorder) + .map_err(|_| "record_all_keys has failed") + .expect("record_all_keys should not fail in benchmarks"); + let size: usize = proof_recorder.drain().into_iter().map(|n| n.data.len()).sum(); + if size > minimal_trie_size as _ { + return root; + } + + let mut trie = TrieDBMut::::from_existing(mdb, &mut root) + .map_err(|_| "TrieDBMut::from_existing has failed") + .expect("TrieDBMut::from_existing should not fail in benchmarks"); + for _ in 0..iterations { + trie.insert(&key_index.encode(), &vec![42u8; leaf_size as _]) + .map_err(|_| "TrieMut::insert has failed") + .expect("TrieMut::insert should not fail in benchmarks"); + key_index += 1; + } + trie.commit(); + } +} diff --git a/polkadot/deny.toml b/polkadot/deny.toml new file mode 100644 index 00000000000..7f91bce7c9f --- /dev/null +++ b/polkadot/deny.toml @@ -0,0 +1,201 @@ +# This template contains all of the possible sections and their default values + +# Note that all fields that take a lint level have these possible values: +# * deny - An error will be produced and the check will fail +# * warn - A warning will be produced, but the check will not fail +# * allow - No warning or error will be produced, though in some cases a note +# will be + +# The values provided in this template are the default values that will be used +# when any section or field is not specified in your own configuration + +# If 1 or more target triples (and optionally, target_features) are specified, +# only the specified targets will be checked when running `cargo deny check`. +# This means, if a particular package is only ever used as a target specific +# dependency, such as, for example, the `nix` crate only being used via the +# `target_family = "unix"` configuration, that only having windows targets in +# this list would mean the nix crate, as well as any of its exclusive +# dependencies not shared by any other crates, would be ignored, as the target +# list here is effectively saying which targets you are building for. +targets = [ + # The triple can be any string, but only the target triples built in to + # rustc (as of 1.40) can be checked against actual config expressions + #{ triple = "x86_64-unknown-linux-musl" }, + # You can also specify which target_features you promise are enabled for a + # particular target. target_features are currently not validated against + # the actual valid features supported by the target architecture. + #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, +] + +# This section is considered when running `cargo deny check advisories` +# More documentation for the advisories section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html +[advisories] +# The path where the advisory database is cloned/fetched into +db-path = "~/.cargo/advisory-db" +# The url of the advisory database to use +db-urls = ["https://github.com/rustsec/advisory-db"] +# The lint level for security vulnerabilities +vulnerability = "deny" +# The lint level for unmaintained crates +unmaintained = "warn" +# The lint level for crates that have been yanked from their source registry +yanked = "warn" +# The lint level for crates with security notices. Note that as of +# 2019-12-17 there are no security notice advisories in +# https://github.com/rustsec/advisory-db +notice = "warn" +# A list of advisory IDs to ignore. Note that ignored advisories will still +# output a note when they are encountered. +ignore = [ + # generic-array lifetime errasure. If all upstream crates upgrade to >=0.14.0 + # we can remove this. + "RUSTSEC-2020-0146", + # yaml-rust < clap. Not feasible to upgrade and also not possible to trigger in practice. + "RUSTSEC-2018-0006", + # We need to wait until Substrate updates their `wasmtime` dependency to fix this. + # TODO: See issue #676: https://github.com/paritytech/parity-bridges-common/issues/676 + "RUSTSEC-2021-0013", + # We need to wait until Substrate updates their `hyper` dependency to fix this. + # TODO: See issue #710: https://github.com/paritytech/parity-bridges-common/issues/681 + "RUSTSEC-2021-0020", +] +# Threshold for security vulnerabilities, any vulnerability with a CVSS score +# lower than the range specified will be ignored. Note that ignored advisories +# will still output a note when they are encountered. +# * None - CVSS Score 0.0 +# * Low - CVSS Score 0.1 - 3.9 +# * Medium - CVSS Score 4.0 - 6.9 +# * High - CVSS Score 7.0 - 8.9 +# * Critical - CVSS Score 9.0 - 10.0 +#severity-threshold = + +# This section is considered when running `cargo deny check licenses` +# More documentation for the licenses section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html +[licenses] +# The lint level for crates which do not have a detectable license +unlicensed = "deny" +# List of explictly allowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +allow = [ + "BlueOak-1.0.0" +] +# List of explictly disallowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +deny = [ + #"Nokia", +] +# Lint level for licenses considered copyleft +copyleft = "allow" +# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses +# * both - The license will be approved if it is both OSI-approved *AND* FSF +# * either - The license will be approved if it is either OSI-approved *OR* FSF +# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF +# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved +# * neither - This predicate is ignored and the default lint level is used +allow-osi-fsf-free = "either" +# Lint level used when no other predicates are matched +# 1. License isn't in the allow or deny lists +# 2. License isn't copyleft +# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" +default = "deny" +# The confidence threshold for detecting a license from license text. +# The higher the value, the more closely the license text must be to the +# canonical license text of a valid SPDX license file. +# [possible values: any between 0.0 and 1.0]. +confidence-threshold = 0.9 +# Allow 1 or more licenses on a per-crate basis, so that particular licenses +# aren't accepted for every possible crate as with the normal allow list +exceptions = [ + # Each entry is the crate and version constraint, and its specific allow + # list + #{ allow = ["Zlib"], name = "adler32", version = "*" }, +] + +# Some crates don't have (easily) machine readable licensing information, +# adding a clarification entry for it allows you to manually specify the +# licensing information +[[licenses.clarify]] +# The name of the crate the clarification applies to +name = "ring" +# THe optional version constraint for the crate +#version = "*" +# The SPDX expression for the license requirements of the crate +expression = "OpenSSL" +# One or more files in the crate's source used as the "source of truth" for +# the license expression. If the contents match, the clarification will be used +# when running the license check, otherwise the clarification will be ignored +# and the crate will be checked normally, which may produce warnings or errors +# depending on the rest of your configuration +license-files = [ + # Each entry is a crate relative path, and the (opaque) hash of its contents + { path = "LICENSE", hash = 0xbd0eed23 } +] +[[licenses.clarify]] +name = "webpki" +expression = "ISC" +license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] + +[licenses.private] +# If true, ignores workspace crates that aren't published, or are only +# published to private registries +ignore = false +# One or more private registries that you might publish crates to, if a crate +# is only published to private registries, and ignore is true, the crate will +# not have its license(s) checked +registries = [ + #"https://sekretz.com/registry +] + +# This section is considered when running `cargo deny check bans`. +# More documentation about the 'bans' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html +[bans] +# Lint level for when multiple versions of the same crate are detected +multiple-versions = "warn" +# The graph highlighting used when creating dotgraphs for crates +# with multiple versions +# * lowest-version - The path to the lowest versioned duplicate is highlighted +# * simplest-path - The path to the version with the fewest edges is highlighted +# * all - Both lowest-version and simplest-path are used +highlight = "lowest-version" +# List of crates that are allowed. Use with care! +allow = [ + #{ name = "ansi_term", version = "=0.11.0" }, +] +# List of crates to deny +deny = [ + { name = "parity-util-mem", version = "<0.6" } + # Each entry the name of a crate and a version range. If version is + # not specified, all versions will be matched. +] +# Certain crates/versions that will be skipped when doing duplicate detection. +skip = [ + #{ name = "ansi_term", version = "=0.11.0" }, +] +# Similarly to `skip` allows you to skip certain crates during duplicate +# detection. Unlike skip, it also includes the entire tree of transitive +# dependencies starting at the specified crate, up to a certain depth, which is +# by default infinite +skip-tree = [ + #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, +] + +# This section is considered when running `cargo deny check sources`. +# More documentation about the 'sources' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html +[sources] +# Lint level for what to happen when a crate from a crate registry that is not +# in the allow list is encountered +unknown-registry = "deny" +# Lint level for what to happen when a crate from a git repository that is not +# in the allow list is encountered +unknown-git = "allow" +# List of URLs for allowed crate registries. Defaults to the crates.io index +# if not specified. If it is specified but empty, no registries are allowed. +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +# List of URLs for allowed Git repositories +allow-git = [] diff --git a/polkadot/deployments/BridgeDeps.Dockerfile b/polkadot/deployments/BridgeDeps.Dockerfile new file mode 100644 index 00000000000..a18a94a7155 --- /dev/null +++ b/polkadot/deployments/BridgeDeps.Dockerfile @@ -0,0 +1,32 @@ +# Image with dependencies required to build projects from the bridge repo. +# +# This image is meant to be used as a building block when building images for +# the various components in the bridge repo, such as nodes and relayers. +FROM ubuntu:20.04 + +ENV LAST_DEPS_UPDATE 2021-04-01 +ENV DEBIAN_FRONTEND=noninteractive + +RUN set -eux; \ + apt-get update && \ + apt-get install -y curl ca-certificates && \ + apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev + +ENV LAST_CERTS_UPDATE 2021-04-01 + +RUN update-ca-certificates && \ + curl https://sh.rustup.rs -sSf | sh -s -- -y + +ENV PATH="/root/.cargo/bin:${PATH}" +ENV LAST_RUST_UPDATE 2021-04-01 + +RUN rustup update stable && \ + rustup install nightly && \ + rustup target add wasm32-unknown-unknown --toolchain nightly + +RUN rustc -vV && \ + cargo -V && \ + gcc -v && \ + cmake --version + +ENV RUST_BACKTRACE 1 diff --git a/polkadot/deployments/README.md b/polkadot/deployments/README.md new file mode 100644 index 00000000000..d553fca611a --- /dev/null +++ b/polkadot/deployments/README.md @@ -0,0 +1,254 @@ +# Bridge Deployments + +## Requirements +Make sure to install `docker` and `docker-compose` to be able to run and test bridge deployments. If +for whatever reason you can't or don't want to use Docker, you can find some scripts for running the +bridge [here](https://github.com/svyatonik/parity-bridges-common.test). + +## Networks +One of the building blocks we use for our deployments are _networks_. A network is a collection of +homogenous blockchain nodes. We have Docker Compose files for each network that we want to bridge. +Each of the compose files found in the `./networks` folder is able to independently spin up a +network like so: + +```bash +docker-compose -f ./networks/rialto.yml up +``` + +After running this command we would have a network of several nodes producing blocks. + +## Bridges +A _bridge_ is a way for several _networks_ to connect to one another. Bridge deployments have their +own Docker Compose files which can be found in the `./bridges` folder. These Compose files typically +contain bridge relayers, which are services external to blockchain nodes, and other components such +as testing infrastructure, or user interfaces. + +Unlike the network Compose files, these *cannot* be deployed on their own. They must be combined +with different networks. + +In general, we can deploy the bridge using `docker-compose up` in the following way: + +```bash +docker-compose -f .yml \ + -f .yml \ + -f .yml \ + -f .yml up +``` + +If you want to see how the Compose commands are actually run, check out the source code of the +[`./run.sh`](./run.sh). + +One thing worth noting is that we have a _monitoring_ Compose file. This adds support for Prometheus +and Grafana. We cover these in more details in the [Monitoring](#monitoring) section. At the moment +the monitoring Compose file is _not_ optional, and must be included for bridge deployments. + +### Running and Updating Deployments +We currently support two bridge deployments +1. Ethereum PoA to Rialto Substrate +2. Rialto Substrate to Millau Substrate + +These bridges can be deployed using our [`./run.sh`](./run.sh) script. + +The first argument it takes is the name of the bridge you want to run. Right now we only support two +bridges: `poa-rialto` and `rialto-millau`. + +```bash +./run.sh poa-rialto +``` + +If you add a second `update` argument to the script it will pull the latest images from Docker Hub +and restart the deployment. + +```bash +./run.sh rialto-millau update +``` + +You can also bring down a deployment using the script with the `stop` argument. + +```bash +./run.sh poa-rialto stop +``` + +### Adding Deployments +We need two main things when adding a new deployment. First, the new network which we want to +bridge. A compose file for the network should be added in the `/networks/` folder. Secondly we'll +need a new bridge Compose file in `./bridges/`. This should configure the bridge relayer nodes +correctly for the two networks, and add any additional components needed for the deployment. If you +want you can also add support in the `./run` script for the new deployment. While recommended it's +not strictly required. + +## General Notes + +Rialto authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. +Rialto-PoA authorities are named: `Arthur`, `Bertha`, `Carlos`. +Millau authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. + +Both authorities and following accounts have enough funds (for test purposes) on corresponding Substrate chains: + +- on Rialto: `Ferdie`, `George`, `Harry`. +- on Millau: `Ferdie`, `George`, `Harry`. + +Names of accounts on Substrate (Rialto and Millau) chains may be prefixed with `//` and used as +seeds for the `sr25519` keys. This seed may also be used in the signer argument in Substrate +and PoA relays. Example: + +```bash +./substrate-relay relay-headers RialtoToMillau \ + --source-host rialto-node-alice \ + --source-port 9944 \ + --target-host millau-node-alice \ + --target-port 9944 \ + --source-signer //Harry \ + --prometheus-host=0.0.0.0 +``` + +Some accounts are used by bridge components. Using these accounts to sign other transactions +is not recommended, because this may lead to nonces conflict. + +Following accounts are used when `poa-rialto` bridge is running: + +- Rialto's `Alice` signs relay transactions with new Rialto-PoA headers; +- Rialto's `Bob` signs relay transactions with Rialto-PoA -> Rialto currency exchange proofs. +- Rialto-PoA's `Arthur`: signs relay transactions with new Rialto headers; +- Rialto-PoA's `Bertha`: signs currency exchange transactions. + +Following accounts are used when `rialto-millau` bridge is running: + +- Millau's `Charlie` signs complex headers+messages relay transactions on Millau chain; +- Rialto's `Charlie` signs complex headers+messages relay transactions on Rialto chain; +- Millau's `Dave` signs Millau transactions which contain messages for Rialto; +- Rialto's `Dave` signs Rialto transactions which contain messages for Millau; +- Millau's `Eve` signs relay transactions with message delivery confirmations (lane 00000001) from Rialto to Millau; +- Rialto's `Eve` signs relay transactions with messages (lane 00000001) from Millau to Rialto; +- Millau's `Ferdie` signs relay transactions with messages (lane 00000001) from Rialto to Millau; +- Rialto's `Ferdie` signs relay transactions with message delivery confirmations (lane 00000001) from Millau to Rialto. + +Following accounts are used when `westend-millau` bridge is running: + +- Millau's `George` signs relay transactions with new Westend headers. + +### Docker Usage +When the network is running you can query logs from individual nodes using: + +```bash +docker logs rialto_poa-node-bertha_1 -f +``` + +To kill all left over containers and start the network from scratch next time: +```bash +docker ps -a --format "{{.ID}}" | xargs docker rm # This removes all containers! +``` + +### Docker Compose Usage +If you're not familiar with how to use `docker-compose` here are some useful commands you'll need +when interacting with the bridge deployments: + +```bash +docker-compose pull # Get the latest images from the Docker Hub +docker-compose build # This is going to build images +docker-compose up # Start all the nodes +docker-compose up -d # Start the nodes in detached mode. +docker-compose down # Stop the network. +``` + +Note that for the you'll need to add the appropriate `-f` arguments that were mentioned in the +[Bridges](#bridges) section. You can read more about using multiple Compose files +[here](https://docs.docker.com/compose/extends/#multiple-compose-files). One thing worth noting is +that the _order_ the compose files are specified in matters. A different order will result in a +different configuration. + +You can sanity check the final config like so: + +```bash +docker-compose -f docker-compose.yml -f docker-compose.override.yml config > docker-compose.merged.yml +``` + +## Docker and Git Deployment +It is also possible to avoid using images from the Docker Hub and instead build +containers from Git. There are two ways to build the images this way. + +### Git Repo +If you have cloned the bridges repo you can build local Docker images by running the following +command at the top level of the repo: + +```bash +docker build . -t local/ --build-arg=PROJECT= +``` + +This will build a local image of a particular component with a tag of +`local/`. This tag can be used in Docker Compose files. + +You can configure the build using using Docker +[build arguments](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg). +Here are the arguments currently supported: + - `BRIDGE_REPO`: Git repository of the bridge node and relay code + - `BRIDGE_HASH`: Commit hash within that repo (can also be a branch or tag) + - `ETHEREUM_REPO`: Git repository of the OpenEthereum client + - `ETHEREUM_HASH`: Commit hash within that repo (can also be a branch or tag) + - `PROJECT`: Project to build withing bridges repo. Can be one of: + - `rialto-bridge-node` + - `millau-bridge-node` + - `ethereum-poa-relay` + - `substrate-relay` + +### GitHub Actions +We have a nightly job which runs and publishes Docker images for the different nodes and relayers to +the [ParityTech Docker Hub](https://hub.docker.com/u/paritytech) organization. These images are used +for our ephemeral (temporary) test networks. Additionally, any time a tag in the form of `v*` is +pushed to GitHub the publishing job is run. This will build all the components (nodes, relayers) and +publish them. + +With images built using either method, all you have to do to use them in a deployment is change the +`image` field in the existing Docker Compose files to point to the tag of the image you want to use. + +### Monitoring +[Prometheus](https://prometheus.io/) is used by the bridge relay to monitor information such as system +resource use, and block data (e.g the best blocks it knows about). In order to visualize this data +a [Grafana](https://grafana.com/) dashboard can be used. + +As part of the Rialto `docker-compose` setup we spin up a Prometheus server and Grafana dashboard. The +Prometheus server connects to the Prometheus data endpoint exposed by the bridge relay. The Grafana +dashboard uses the Prometheus server as its data source. + +The default port for the bridge relay's Prometheus data is `9616`. The host and port can be +configured though the `--prometheus-host` and `--prometheus-port` flags. The Prometheus server's +dashboard can be accessed at `http://localhost:9090`. The Grafana dashboard can be accessed at +`http://localhost:3000`. Note that the default log-in credentials for Grafana are `admin:admin`. + +### Environment Variables +Here is an example `.env` file which is used for production deployments and network updates. For +security reasons it is not kept as part of version control. When deploying a network this +file should be correctly populated and kept in the appropriate [`bridges`](`./bridges`) deployment +folder. + +The `UI_SUBSTRATE_PROVIDER` variable lets you define the url of the Substrate node that the user +interface will connect to. `UI_ETHEREUM_PROVIDER` is used only as a guidance for users to connect +Metamask to the right Ethereum network. `UI_EXPECTED_ETHEREUM_NETWORK_ID` is used by +the user interface as a fail safe to prevent users from connecting their Metamask extension to an +unexpected network. + +```bash +GRAFANA_ADMIN_PASS=admin_pass +GRAFANA_SERVER_ROOT_URL=%(protocol)s://%(domain)s:%(http_port)s/ +GRAFANA_SERVER_DOMAIN=server.domain.io +MATRIX_ACCESS_TOKEN="access-token" +WITH_PROXY=1 # Optional +UI_SUBSTRATE_PROVIDER=ws://localhost:9944 +UI_ETHEREUM_PROVIDER=http://localhost:8545 +UI_EXPECTED_ETHEREUM_NETWORK_ID=105 +``` + +### UI + +Use [wss://rialto.bridges.test-installations.parity.io/](https://polkadot.js.org/apps/) +as a custom endpoint for [https://polkadot.js.org/apps/](https://polkadot.js.org/apps/). + +### Polkadot.js UI + +To teach the UI decode our custom types used in the pallet, go to: `Settings -> Developer` +and import the [`./types.json`](./types.json) + +## Scripts + +The are some bash scripts in `scripts` folder that allow testing `Relay` +without running the entire network within docker. Use if needed for development. diff --git a/polkadot/deployments/bridges/poa-rialto/Front-end.Dockerfile b/polkadot/deployments/bridges/poa-rialto/Front-end.Dockerfile new file mode 100644 index 00000000000..427f0504e57 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/Front-end.Dockerfile @@ -0,0 +1,26 @@ +FROM node:12 as build-deps + +# install tools and dependencies +RUN set -eux; \ + apt-get install -y git + +# clone UI repo +RUN cd /usr/src/ && git clone https://github.com/paritytech/bridge-ui.git +WORKDIR /usr/src/bridge-ui +RUN yarn +ARG SUBSTRATE_PROVIDER +ARG ETHEREUM_PROVIDER +ARG EXPECTED_ETHEREUM_NETWORK_ID + +ENV SUBSTRATE_PROVIDER $SUBSTRATE_PROVIDER +ENV ETHEREUM_PROVIDER $ETHEREUM_PROVIDER +ENV EXPECTED_ETHEREUM_NETWORK_ID $EXPECTED_ETHEREUM_NETWORK_ID + +RUN yarn build:docker + +# Stage 2 - the production environment +FROM nginx:1.12 +COPY --from=build-deps /usr/src/bridge-ui/nginx/*.conf /etc/nginx/conf.d/ +COPY --from=build-deps /usr/src/bridge-ui/dist /usr/share/nginx/html +EXPOSE 80 +CMD ["nginx", "-g", "daemon off;"] diff --git a/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json b/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json new file mode 100644 index 00000000000..7e197bb882f --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-exchange-dashboard.json @@ -0,0 +1,474 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Ethereum_to_Substrate_Exchange_best_block_numbers", + "instant": true, + "interval": "", + "legendFormat": "Best {{type}} block", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Best finalized blocks", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 7, + "y": 0 + }, + "id": 12, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Ethereum_to_Substrate_Exchange_processed_blocks", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of processed blocks since last restart", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Ethereum_to_Substrate_Exchange_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": null + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average System Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 8, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Ethereum_to_Substrate_Exchange_process_cpu_usage_percentage", + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay Process CPU Usage", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 14, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Ethereum_to_Substrate_Exchange_processed_transactions", + "instant": true, + "interval": "", + "legendFormat": "{{type}} transactions", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of processed transactions since last restart", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Ethereum_to_Substrate_Exchange_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage for Relay Process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Ethereum PoA to Rialto Exchange Dashboard", + "uid": "relay-poa-to-rialto-exchange", + "version": 1 +} diff --git a/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json b/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json new file mode 100644 index 00000000000..05d06e94981 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-poa-to-rialto-headers-dashboard.json @@ -0,0 +1,694 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 5 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "5m", + "handler": 1, + "message": "", + "name": "Synced Header Difference is Over 5 (Ethereum PoA to Rialto)", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Shows how many headers behind the target chain is from the source chain.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}) - max(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"target\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 5 + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Difference Between Source and Target Headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 5 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "2m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "3m", + "frequency": "5m", + "handler": 1, + "name": "No New Headers (Ethereum PoA to Rialto)", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "How many headers has the relay synced from the source node in the last 2 mins?", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Ethereum_to_Substrate_Sync_best_block_numbers{node=\"source\"}[2m])", + "interval": "", + "legendFormat": "Number of new Headers on Ethereum PoA (Last 2 Mins)", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 5 + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Headers Synced on Rialto (Last 2 Mins)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": { + "align": null + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 2, + "interval": "5s", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Ethereum_to_Substrate_Sync_best_block_numbers", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Best Known Header on {{node}} Node", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Best Blocks according to Relay", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Ethereum_to_Substrate_Sync_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": null + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average System Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 8 + }, + "id": 12, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "avg_over_time(Ethereum_to_Substrate_Sync_process_cpu_usage_percentage[1m])", + "instant": true, + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay Process CPU Usage ", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 4, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showUnfilled": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Ethereum_to_Substrate_Sync_blocks_in_state", + "instant": true, + "interval": "", + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Queued Headers in Relay", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Ethereum_to_Substrate_Sync_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage for Relay Process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Ethereum PoA to Rialto Header Sync Dashboard", + "uid": "relay-poa-to-rialto-headers", + "version": 1 +} diff --git a/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json b/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json new file mode 100644 index 00000000000..149c637fcb1 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/dashboard/grafana/relay-rialto-to-poa-headers-dashboard.json @@ -0,0 +1,694 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 5 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "5m", + "handler": 1, + "message": "", + "name": "Synced Header Difference is Over 5 (Rialto to Ethereum PoA)", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Shows how many headers behind the target chain is from the source chain.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}) - max(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"target\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 5 + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Difference Between Source and Target Headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 5 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "2m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "3m", + "frequency": "5m", + "handler": 1, + "name": "No New Headers (Rialto to Ethereum PoA)", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "How many headers has the relay synced from the source node in the last 2 mins?", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Substrate_to_Ethereum_Sync_best_block_numbers{node=\"source\"}[2m])", + "interval": "", + "legendFormat": "Number of new Headers on Rialto (Last 2 Mins)", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 5 + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Headers Synced on Ethereum PoA (Last 2 Mins)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": { + "align": null + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 2, + "interval": "5s", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Substrate_to_Ethereum_Sync_best_block_numbers", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Best Known Header on {{node}} Node", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Best Blocks according to Relay", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Substrate_to_Ethereum_Sync_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": null + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average System Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 8 + }, + "id": 12, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "avg_over_time(Substrate_to_Ethereum_Sync_process_cpu_usage_percentage[1m])", + "instant": true, + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay Process CPU Usage ", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 4, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showUnfilled": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Substrate_to_Ethereum_Sync_blocks_in_state", + "instant": true, + "interval": "", + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Queued Headers in Relay", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Substrate_to_Ethereum_Sync_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage for Relay Process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rialto to Ethereum PoA Header Sync Dashboard", + "uid": "relay-rialto-to-poa-headers", + "version": 1 +} diff --git a/polkadot/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml b/polkadot/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml new file mode 100644 index 00000000000..b0038008ef6 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/dashboard/prometheus/targets.yml @@ -0,0 +1,4 @@ +- targets: + - relay-headers-poa-to-rialto:9616 + - relay-poa-exchange-rialto:9616 + - relay-headers-rialto-to-poa:9616 diff --git a/polkadot/deployments/bridges/poa-rialto/docker-compose.yml b/polkadot/deployments/bridges/poa-rialto/docker-compose.yml new file mode 100644 index 00000000000..6bdcb230124 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/docker-compose.yml @@ -0,0 +1,94 @@ +# This Compose file should be built using the Rialto and Eth-PoA node +# compose files. Otherwise it won't work. +# +# Exposed ports: 9616, 9716, 9816, 9916, 8080 + +version: '3.5' +services: + # We override these nodes to make sure we have the correct chain config for this network. + poa-node-arthur: &poa-node + volumes: + - ./bridges/poa-rialto/poa-config:/config + poa-node-bertha: + <<: *poa-node + poa-node-carlos: + <<: *poa-node + + # We provide an override for this particular node since this is a public facing + # node which we use to connect from things like Polkadot JS Apps. + rialto-node-charlie: + environment: + VIRTUAL_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link + VIRTUAL_PORT: 9944 + LETSENCRYPT_HOST: rialto.bridges.test-installations.parity.io,wss.rialto.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io + + relay-headers-poa-to-rialto: ð-poa-relay + image: paritytech/ethereum-poa-relay + entrypoint: /entrypoints/relay-headers-poa-to-rialto-entrypoint.sh + volumes: + - ./bridges/poa-rialto/entrypoints:/entrypoints + environment: + RUST_LOG: rpc=trace,bridge=trace + ports: + - "9616:9616" + depends_on: &all-nodes + - poa-node-arthur + - poa-node-bertha + - poa-node-carlos + - rialto-node-alice + - rialto-node-bob + - rialto-node-charlie + - rialto-node-dave + - rialto-node-eve + + relay-poa-exchange-rialto: + <<: *eth-poa-relay + entrypoint: /entrypoints/relay-poa-exchange-rialto-entrypoint.sh + ports: + - "9716:9616" + + relay-headers-rialto-to-poa: + <<: *eth-poa-relay + entrypoint: /entrypoints/relay-headers-rialto-to-poa-entrypoint.sh + ports: + - "9816:9616" + + poa-exchange-tx-generator: + <<: *eth-poa-relay + entrypoint: /entrypoints/poa-exchange-tx-generator-entrypoint.sh + environment: + EXCHANGE_GEN_MIN_AMOUNT_FINNEY: ${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} + EXCHANGE_GEN_MAX_AMOUNT_FINNEY: ${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} + EXCHANGE_GEN_MAX_SUBMIT_DELAY_S: ${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} + ports: + - "9916:9616" + depends_on: + - relay-headers-poa-to-rialto + - relay-headers-rialto-to-poa + + front-end: + build: + context: . + dockerfile: ./bridges/poa-rialto/Front-end.Dockerfile + args: + SUBSTRATE_PROVIDER: ${UI_SUBSTRATE_PROVIDER:-ws://localhost:9944} + ETHEREUM_PROVIDER: ${UI_ETHEREUM_PROVIDER:-http://localhost:8545} + EXPECTED_ETHEREUM_NETWORK_ID: ${UI_EXPECTED_ETHEREUM_NETWORK_ID:-105} + ports: + - "8080:80" + + # Note: These are being overridden from the top level `monitoring` compose file. + prometheus-metrics: + volumes: + - ./bridges/poa-rialto/dashboard/prometheus/targets.yml:/etc/prometheus/targets-poa-rialto.yml + depends_on: *all-nodes + + grafana-dashboard: + volumes: + - ./bridges/poa-rialto/dashboard/grafana:/etc/grafana/dashboards/poa-rialto:ro + environment: + VIRTUAL_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link + VIRTUAL_PORT: 3000 + LETSENCRYPT_HOST: dashboard.rialto.bridges.test-installations.parity.io,grafana.rialto.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io diff --git a/polkadot/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh b/polkadot/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh new file mode 100755 index 00000000000..9af373b0216 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/entrypoints/poa-exchange-tx-generator-entrypoint.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT +# +# This scripts periodically calls relay binary to generate PoA -> Substrate +# exchange transaction from hardcoded PoA senders (assuming they have +# enough funds) to hardcoded Substrate recipients. + +set -eu + +# Path to relay binary +RELAY_BINARY_PATH=${RELAY_BINARY_PATH:-./ethereum-poa-relay} +# Ethereum node host +ETH_HOST=${ETH_HOST:-poa-node-arthur} +# Ethereum node websocket port +ETH_PORT=${ETH_PORT:-8546} +# Ethereum chain id +ETH_CHAIN_ID=${ETH_CHAIN_ID:-105} + +# All possible Substrate recipients (hex-encoded public keys) +SUB_RECIPIENTS=( + # Alice (5GrwvaEF...) + "d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"\ + # Bob (5FHneW46...) + "8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48"\ + # Charlie (5FLSigC9...) + "90b5ab205c6974c9ea841be688864633dc9ca8a357843eeacf2314649965fe22"\ + # Dave (5DAAnrj7...) + "306721211d5404bd9da88e0204360a1a9ab8b87c66c1bc2fcdd37f3c2222cc20"\ + # Eve (5HGjWAeF...) + "e659a7a1628cdd93febc04a4e0646ea20e9f5f0ce097d9a05290d4a9e054df4e"\ + # Ferdie (5CiPPseX...) + "1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c" +) +# All possible Ethereum signers (hex-encoded private keys) +# (note that we're tracking nonce here => sender must not send concurrent transactions) +ETH_SIGNERS=( + # Bertha account (0x007594304039c2937a12220338aab821d819f5a4) and its current nonce (unknown by default) + "bc10e0f21e33456ade82182dd1ebdbdd89bca923d4e4adbd90fb5b44d7098cbe" "" +) +# Minimal exchange amount (in finney) +MIN_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MIN_AMOUNT_FINNEY:-1} # 0.1 ETH +# Maximal exchange amount (in finney) +MAX_EXCHANGE_AMOUNT_FINNEY=${EXCHANGE_GEN_MAX_AMOUNT_FINNEY:-100000} # 100 ETH +# Max delay before submitting transactions (s) +MAX_SUBMIT_DELAY_S=${EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-60} + +while true +do + # sleep some time + SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` + echo "Sleeping $SUBMIT_DELAY_S seconds..." + sleep $SUBMIT_DELAY_S + + # select recipient + SUB_RECIPIENTS_MAX_INDEX=$((${#SUB_RECIPIENTS[@]} - 1)) + SUB_RECIPIENT_INDEX=`shuf -i 0-$SUB_RECIPIENTS_MAX_INDEX -n 1` + SUB_RECIPIENT=${SUB_RECIPIENTS[$SUB_RECIPIENT_INDEX]} + + # select signer + ETH_SIGNERS_MAX_INDEX=$(((${#ETH_SIGNERS[@]} - 1) / 2)) + ETH_SIGNERS_INDEX=`shuf -i 0-$ETH_SIGNERS_MAX_INDEX -n 1` + ETH_SIGNER_INDEX=$(($ETH_SIGNERS_INDEX * 2)) + ETH_SIGNER_NONCE_INDEX=$(($ETH_SIGNER_INDEX + 1)) + ETH_SIGNER=${ETH_SIGNERS[$ETH_SIGNER_INDEX]} + ETH_SIGNER_NONCE=${ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]} + if [ -z $ETH_SIGNER_NONCE ]; then + ETH_SIGNER_NONCE_ARG= + else + ETH_SIGNER_NONCE_ARG=`printf -- "--eth-nonce=%s" $ETH_SIGNER_NONCE` + fi + + # select amount + EXCHANGE_AMOUNT_FINNEY=`shuf -i $MIN_EXCHANGE_AMOUNT_FINNEY-$MAX_EXCHANGE_AMOUNT_FINNEY -n 1` + EXCHANGE_AMOUNT_ETH=`printf "%s000" $EXCHANGE_AMOUNT_FINNEY` + + # submit transaction + echo "Sending $EXCHANGE_AMOUNT_ETH from PoA:$ETH_SIGNER to Substrate:$SUB_RECIPIENT. Nonce: $ETH_SIGNER_NONCE" + set -x + SUBMIT_OUTPUT=`$RELAY_BINARY_PATH 2>&1 eth-submit-exchange-tx \ + --sub-recipient=$SUB_RECIPIENT \ + --eth-host=$ETH_HOST \ + --eth-port=$ETH_PORT \ + --eth-chain-id=$ETH_CHAIN_ID \ + --eth-signer=$ETH_SIGNER \ + --eth-amount=$EXCHANGE_AMOUNT_ETH \ + $ETH_SIGNER_NONCE_ARG` + set +x + + # update sender nonce + SUBMIT_OUTPUT_RE='nonce: ([0-9]+)' + if [[ $SUBMIT_OUTPUT =~ $SUBMIT_OUTPUT_RE ]]; then + ETH_SIGNER_NONCE=${BASH_REMATCH[1]} + ETH_SIGNERS[$ETH_SIGNER_NONCE_INDEX]=$(($ETH_SIGNER_NONCE + 1)) + else + echo "Missing nonce in relay response: $SUBMIT_OUTPUT" + exit 1 + fi +done diff --git a/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh b/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh new file mode 100755 index 00000000000..2f051d40d5c --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://poa-node-arthur:8545/api/health +curl -v http://poa-node-bertha:8545/api/health +curl -v http://poa-node-carlos:8545/api/health +curl -v http://rialto-node-alice:9933/health +curl -v http://rialto-node-bob:9933/health +curl -v http://rialto-node-charlie:9933/health + +/home/user/ethereum-poa-relay eth-to-sub \ + --sub-host rialto-node-alice \ + --eth-host poa-node-arthur \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh b/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh new file mode 100755 index 00000000000..1e51d2d32d1 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -xeu + +sleep 10 + +curl -v http://rialto-node-bob:9933/health +curl -v http://poa-node-bertha:8545/api/health + +# Try to deploy contracts first +# networkID = 0x69 +# Arthur's key. +/home/user/ethereum-poa-relay eth-deploy-contract \ + --eth-chain-id 105 \ + --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ + --sub-host rialto-node-bob \ + --eth-host poa-node-bertha || echo "Failed to deploy contracts." + +sleep 10 +echo "Starting SUB -> ETH relay" +/home/user/ethereum-poa-relay sub-to-eth \ + --eth-contract c9a61fb29e971d1dabfd98657969882ef5d0beee \ + --eth-chain-id 105 \ + --eth-signer 0399dbd15cf6ee8250895a1f3873eb1e10e23ca18e8ed0726c63c4aea356e87d \ + --sub-host rialto-node-bob \ + --eth-host poa-node-bertha \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh b/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh new file mode 100755 index 00000000000..7be12000b91 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh @@ -0,0 +1,16 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://poa-node-arthur:8545/api/health +curl -v http://poa-node-bertha:8545/api/health +curl -v http://poa-node-carlos:8545/api/health +curl -v http://rialto-node-alice:9933/health +curl -v http://rialto-node-bob:9933/health +curl -v http://rialto-node-charlie:9933/health + +/home/user/ethereum-poa-relay eth-exchange-sub \ + --sub-host rialto-node-alice \ + --sub-signer //Bob \ + --eth-host poa-node-arthur \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json new file mode 100644 index 00000000000..9e26dfeeb6e --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/address_book.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json new file mode 100644 index 00000000000..fa59a46480c --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/arthur.json @@ -0,0 +1 @@ +{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json new file mode 100644 index 00000000000..7168ec4f71f --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/bertha.json @@ -0,0 +1 @@ +{"id":"6d1e690f-0b52-35f7-989b-46100e7c65ed","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"a5b4d0466834e75c9fd29c6cbbac57ad"},"ciphertext":"102ac328cbe66d8cb8515c42e3268776a9be4419a5cb7b79852860b1e691c15b","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"e8daf2e70086b0cacf925d368fd3f60cada1285e39a42c4cc73c135368cfdbef"},"mac":"1bc3b750900a1143c64ba9e677d69e1093aab47cb003ba09f3cd595a3b422db5"},"address":"007594304039c2937a12220338aab821d819f5a4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json new file mode 100644 index 00000000000..2f9759f7bdf --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/carlos.json @@ -0,0 +1 @@ +{"id":"ffaebba1-f1b9-8758-7034-0314040b1396","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"97f124bc8a7bf55d00eb2755c2b50364"},"ciphertext":"b87827816f33d2bef2dc3102a8a7744b86912f8ace10e45cb282a13487769ed2","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"3114c67a05bff7831d112083f566b176bfc874aea160eebadbe5564e406ee85c"},"mac":"e9bfe8fd6f612bc036bb57659297fc03db022264f5086a1b5726972d3ab6f64a"},"address":"004e7a39907f090e19b0b80a277e77b72b22e269","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json new file mode 100644 index 00000000000..f1df56b8413 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/keys/BridgePoa/diego.json @@ -0,0 +1 @@ +{"id":"ef9eb431-dc73-cf31-357e-736f64febe68","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"7077f1c4170d9fc2e05c5956be32fb51"},"ciphertext":"a053be448768d984257aeb8f9c7913e3f54c6e6e741accad9f09dd70c2d9828c","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"12580aa4624040970301e7474d3f9b2a93552bfe9ea2517f7119ccf8e91ebd0d"},"mac":"796dbb48adcfc09041fe39121632801d9f950d3c73dd47105180d8097d4f4491"},"address":"00eed42bf93b498f28acd21d207427a14074defe","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/pass b/polkadot/deployments/bridges/poa-rialto/poa-config/pass new file mode 100644 index 00000000000..f3097ab1308 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/pass @@ -0,0 +1 @@ +password diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/poa-node-config b/polkadot/deployments/bridges/poa-rialto/poa-config/poa-node-config new file mode 100644 index 00000000000..2b3c56453d7 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/poa-node-config @@ -0,0 +1,20 @@ +[parity] +chain = "/config/poa.json" +keys_path = "/config/keys" +no_persistent_txqueue = true + +[account] +password = ["/config/pass"] + +[network] +reserved_peers = "/config/reserved" + +[rpc] +apis = ["all"] +cors = ["moz-extension://*", "chrome-extension://*"] + +[mining] +force_sealing = true + +[misc] +unsafe_expose = true diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/poa.json b/polkadot/deployments/bridges/poa-rialto/poa-config/poa.json new file mode 100644 index 00000000000..12a8a58f263 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/poa.json @@ -0,0 +1,184 @@ +{ + "name": "BridgePoa", + "engine": { + "authorityRound": { + "params": { + "stepDuration": 10, + "validators": { + "list": [ + "0x005e714f896a8b7cede9d38688c1a81de72a58e4", + "0x007594304039c2937a12220338aab821d819f5a4", + "0x004e7a39907f090e19b0b80a277e77b72b22e269" + ] + }, + "validateScoreTransition": 0, + "validateStepTransition": 0, + "maximumUncleCountTransition": 0, + "maximumUncleCount": 0, + "emptyStepsTransition": "0xfffffffff", + "maximumEmptySteps": 1 + } + } + }, + "params": { + "accountStartNonce": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", + "eip140Transition": "0x0", + "eip145Transition": "0x0", + "eip150Transition": "0x0", + "eip155Transition": "0x0", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "eip211Transition": "0x0", + "eip214Transition": "0x0", + "eip658Transition": "0x0", + "eip98Transition": "0x7fffffffffffff", + "gasLimitBoundDivisor": "0x0400", + "maxCodeSize": 24576, + "maxCodeSizeTransition": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x69", + "validateChainIdTransition": "0x0", + "validateReceiptsTransition": "0x0" + }, + "genesis": { + "seal": { + "authorityRound": { + "step": "0x0", + "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x20000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x", + "gasLimit": "0x222222" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, + "0000000000000000000000000000000000000006": { + "balance": "1", + "builtin": { + "name": "alt_bn128_add", + "pricing": { + "0": { + "price": { "alt_bn128_const_operations": { "price": 500 }} + }, + "0x7fffffffffffff": { + "info": "EIP 1108 transition", + "price": { "alt_bn128_const_operations": { "price": 150 }} + } + } + } + }, + "0000000000000000000000000000000000000007": { + "balance": "1", + "builtin": { + "name": "alt_bn128_mul", + "pricing": { + "0": { + "price": { "alt_bn128_const_operations": { "price": 40000 }} + }, + "0x7fffffffffffff": { + "info": "EIP 1108 transition", + "price": { "alt_bn128_const_operations": { "price": 6000 }} + } + } + } + }, + "0000000000000000000000000000000000000008": { + "balance": "1", + "builtin": { + "name": "alt_bn128_pairing", + "pricing": { + "0": { + "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} + }, + "0x7fffffffffffff": { + "info": "EIP 1108 transition", + "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} + } + } + } + }, + "0x0000000000000000000000000000000000000009": { + "builtin": { + "name": "blake2_f", + "activate_at": "0xd751a5", + "pricing": { + "blake2_f": { + "gas_per_round": 1 + } + } + } + }, + "0x0000000000000000000000000000000000000010": { + "builtin": { + "name": "parse_substrate_header", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000011": { + "builtin": { + "name": "get_substrate_header_signal", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000012": { + "builtin": { + "name": "verify_substrate_finality_proof", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000013": { + "builtin": { + "name": "my_test", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { + "balance": "1606938044258990275541962092341162602522202993782792835301376", + "nonce": "0x1" + }, + "0x007594304039c2937a12220338aab821d819f5a4": { + "balance": "1606938044258990275541962092341162602522202993782792835301376", + "nonce": "0x1" + }, + "0x004e7a39907f090e19b0b80a277e77b72b22e269": { + "balance": "1606938044258990275541962092341162602522202993782792835301376", + "nonce": "0x1" + }, + "0x00eed42bf93b498f28acd21d207427a14074defe": { + "balance": "1606938044258990275541962092341162602522202993782792835301376", + "nonce": "0x1" + } + } +} diff --git a/polkadot/deployments/bridges/poa-rialto/poa-config/reserved b/polkadot/deployments/bridges/poa-rialto/poa-config/reserved new file mode 100644 index 00000000000..209d71b7fb3 --- /dev/null +++ b/polkadot/deployments/bridges/poa-rialto/poa-config/reserved @@ -0,0 +1,3 @@ +enode://543d0874df46dff238d62547160f9d11e3d21897d7041bbbe46a04d2ee56d9eaf108f2133c0403159624f7647198e224d0755d23ad0e1a50c0912973af6e8a8a@poa-node-arthur:30303 +enode://710de70733e88a24032e53054985f7239e37351f5f3335a468a1a78a3026e9f090356973b00262c346a6608403df2c7107fc4def2cfe4995ea18a41292b9384f@poa-node-bertha:30303 +enode://943525f415b9482f1c49bd39eb979e4e2b406f4137450b0553bffa5cba2928e25ff89ef70f7325aad8a75dbb5955eaecc1aee7ac55d66bcaaa07c8ea58adb23a@poa-node-carlos:30303 diff --git a/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json b/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json new file mode 100644 index 00000000000..69396162bba --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json @@ -0,0 +1,1429 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Rialto\", \"type\", \"target\"), \"type\", \"At Millau\", \"type\", \"target_at_source\")", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Rialto headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Millau\", \"type\", \"source\"), \"type\", \"At Rialto\", \"type\", \"source_at_target\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Millau headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages generated at Millau are not detected by relay", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "sum" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages from Millau to Rialto are not being delivered", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Undelivered messages at Rialto", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", + "interval": "", + "legendFormat": "Messages delivered to Rialto in last 1m", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Too many unconfirmed messages", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 20 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed messages at Millau", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Rewards are not being confirmed", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 20 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Rialto", + "refId": "A" + }, + { + "expr": "(scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Rialto (zero if messages are not being delivered to Rialto)", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Reward lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Millau to Rialto are not being delivered", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 21, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=\"target_latest_received\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Millau to Rialto are not being confirmed", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=\"source_latest_confirmed\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 38 + }, + "id": 16, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "avg_over_time(Millau_to_Rialto_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "instant": true, + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay process CPU usage (1 CPU = 100)", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 38 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Millau_to_Rialto_MessageLane_00000000_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "System load average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 38 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Millau_to_Rialto_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory used by relay process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Millau to Rialto Message Sync Dashboard", + "uid": "relay-millau-to-rialto-messages", + "version": 2 +} diff --git a/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json b/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json new file mode 100644 index 00000000000..29691e0a060 --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json @@ -0,0 +1,1420 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 4, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"target|target_at_source\"}, \"type\", \"At Millau\", \"type\", \"target\"), \"type\", \"At Rialto\", \"type\", \"target_at_source\")", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Millau headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_best_block_numbers{type=~\"source|source_at_target\"}, \"type\", \"At Rialto\", \"type\", \"source\"), \"type\", \"At Millau\", \"type\", \"source_at_target\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Best finalized Rialto headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages generated at Rialto are not detected by relay", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 9 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "sum" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages from Rialto to Millau are not being delivered", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Undelivered messages at Millau", + "refId": "A" + }, + { + "expr": "increase(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m])", + "interval": "", + "legendFormat": "Messages delivered to Millau in last 1m", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Too many unconfirmed messages", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 20 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed messages at Rialto", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Rewards are not being confirmed", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 20 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Millau", + "refId": "A" + }, + { + "expr": "(scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", + "interval": "", + "legendFormat": "Unconfirmed rewards at Millau (zero if messages are not being delivered to Millau)", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Reward lags (00000000)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Rialto to Millau are not being delivered", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 21, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=\"target_latest_received\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "Messages generated in last 5 minutes", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Delivery race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "1m", + "handler": 1, + "name": "Messages (00000001) from Rialto to Millau are not being confirmed", + "noDataState": "alerting", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + }, + { + "expr": "increase(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=\"source_latest_confirmed\"}[10m])", + "hide": true, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Confirmations race (00000001)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 38 + }, + "id": 16, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "avg_over_time(Rialto_to_Millau_MessageLane_00000000_process_cpu_usage_percentage[1m])", + "instant": true, + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay process CPU usage (1 CPU = 100)", + "type": "gauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 38 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Rialto_to_Millau_MessageLane_00000000_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "System load average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 38 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Rialto_to_Millau_MessageLane_00000000_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory used by relay process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rialto to Millau Message Sync Dashboard", + "uid": "relay-rialto-to-millau-messages", + "version": 2 +} diff --git a/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json b/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json new file mode 100644 index 00000000000..61ff281cc2a --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json @@ -0,0 +1,454 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 9, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_storage_proof_overhead", + "interval": "", + "legendFormat": "Actual overhead", + "refId": "A" + }, + { + "exemplar": true, + "expr": "1024", + "hide": false, + "interval": "", + "legendFormat": "At runtime (hardcoded)", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rialto: storage proof overhead", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:111", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:112", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Westend_to_Millau_Sync_kusama_to_base_conversion_rate / Westend_to_Millau_Sync_polkadot_to_base_conversion_rate", + "interval": "", + "legendFormat": "Outside of runtime (actually Polkadot -> Kusama)", + "refId": "A" + }, + { + "exemplar": true, + "expr": "Rialto_to_Millau_MessageLane_00000000_rialto_millau_to_rialto_conversion_rate", + "hide": false, + "interval": "", + "legendFormat": "At runtime", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rialto: Millau -> Rialto conversion rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:49", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:50", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Millau_to_Rialto_MessageLane_00000000_millau_storage_proof_overhead", + "interval": "", + "legendFormat": "Actual overhead", + "refId": "A" + }, + { + "exemplar": true, + "expr": "1024", + "hide": false, + "interval": "", + "legendFormat": "At runtime (hardcoded)", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Millau: storage proof overhead", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:111", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:112", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.5.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "exemplar": true, + "expr": "Westend_to_Millau_Sync_polkadot_to_base_conversion_rate / Westend_to_Millau_Sync_kusama_to_base_conversion_rate", + "interval": "", + "legendFormat": "Outside of runtime (actually Kusama -> Polkadot)", + "refId": "A" + }, + { + "exemplar": true, + "expr": "Millau_to_Rialto_MessageLane_00000000_millau_rialto_to_millau_conversion_rate", + "hide": false, + "interval": "", + "legendFormat": "At runtime", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Millau: Rialto -> Millau conversion rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:49", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:50", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Rialto+Millau maintenance dashboard", + "uid": "7AuyrjlMz", + "version": 2 +} diff --git a/polkadot/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml b/polkadot/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml new file mode 100644 index 00000000000..16b798b5a25 --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml @@ -0,0 +1,4 @@ +- targets: + - relay-millau-rialto:9616 + - relay-messages-millau-to-rialto-lane-00000001:9616 + - relay-messages-rialto-to-millau-lane-00000001:9616 diff --git a/polkadot/deployments/bridges/rialto-millau/docker-compose.yml b/polkadot/deployments/bridges/rialto-millau/docker-compose.yml new file mode 100644 index 00000000000..5f00e449c3b --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/docker-compose.yml @@ -0,0 +1,95 @@ +# Exposed ports: 10016, 10116, 10216, 10316, 10416 + +version: '3.5' +services: + # We provide overrides for these particular nodes since they are public facing + # nodes which we use to connect from things like Polkadot JS Apps. + rialto-node-charlie: + environment: + VIRTUAL_HOST: wss.rialto.brucke.link + VIRTUAL_PORT: 9944 + LETSENCRYPT_HOST: wss.rialto.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io + + millau-node-charlie: + environment: + VIRTUAL_HOST: wss.millau.brucke.link + VIRTUAL_PORT: 9944 + LETSENCRYPT_HOST: wss.millau.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io + + relay-millau-rialto: &sub-bridge-relay + image: paritytech/substrate-relay + entrypoint: /entrypoints/relay-millau-rialto-entrypoint.sh + volumes: + - ./bridges/rialto-millau/entrypoints:/entrypoints + environment: + RUST_LOG: rpc=trace,bridge=trace + ports: + - "10016:9616" + depends_on: &all-nodes + - millau-node-alice + - millau-node-bob + - millau-node-charlie + - millau-node-dave + - millau-node-eve + - rialto-node-alice + - rialto-node-bob + - rialto-node-charlie + - rialto-node-dave + - rialto-node-eve + + relay-messages-millau-to-rialto-lane-00000001: + <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_LANE: "00000001" + entrypoint: /entrypoints/relay-messages-millau-to-rialto-entrypoint.sh + ports: + - "10116:9616" + depends_on: + - relay-millau-rialto + + relay-messages-millau-to-rialto-generator: + <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" + entrypoint: /entrypoints/relay-messages-to-rialto-generator-entrypoint.sh + ports: + - "10216:9616" + depends_on: + - relay-millau-rialto + + relay-messages-rialto-to-millau-lane-00000001: + <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_LANE: "00000001" + entrypoint: /entrypoints/relay-messages-rialto-to-millau-entrypoint.sh + ports: + - "10316:9616" + depends_on: + - relay-millau-rialto + + relay-messages-rialto-to-millau-generator: + <<: *sub-bridge-relay + environment: + MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" + entrypoint: /entrypoints/relay-messages-to-millau-generator-entrypoint.sh + ports: + - "10416:9616" + depends_on: + - relay-millau-rialto + + # Note: These are being overridden from the top level `monitoring` compose file. + grafana-dashboard: + environment: + VIRTUAL_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link + VIRTUAL_PORT: 3000 + LETSENCRYPT_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io + volumes: + - ./bridges/rialto-millau/dashboard/grafana:/etc/grafana/dashboards/rialto-millau:ro + + prometheus-metrics: + volumes: + - ./bridges/rialto-millau/dashboard/prometheus/targets.yml:/etc/prometheus/targets-rialto-millau.yml + depends_on: *all-nodes diff --git a/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh new file mode 100755 index 00000000000..48e5a281799 --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://millau-node-bob:9933/health +curl -v http://rialto-node-bob:9933/health + +MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} + +/home/user/substrate-relay relay-messages MillauToRialto \ + --lane $MESSAGE_LANE \ + --source-host millau-node-bob \ + --source-port 9944 \ + --source-signer //Eve \ + --target-host rialto-node-bob \ + --target-port 9944 \ + --target-signer //Eve \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh new file mode 100755 index 00000000000..378aeedd9f9 --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh @@ -0,0 +1,18 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://millau-node-bob:9933/health +curl -v http://rialto-node-bob:9933/health + +MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} + +/home/user/substrate-relay relay-messages RialtoToMillau \ + --lane $MESSAGE_LANE \ + --source-host rialto-node-bob \ + --source-port 9944 \ + --source-signer //Ferdie \ + --target-host millau-node-bob \ + --target-port 9944 \ + --target-signer //Ferdie \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh new file mode 100755 index 00000000000..96676bad85b --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT +# +# This scripts periodically calls the Substrate relay binary to generate messages. These messages +# are sent from the Rialto network to the Millau network. + +set -eu + +# Max delay before submitting transactions (s) +MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} +MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} +SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} +MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=1024 +FERDIE_ADDR=5oSLwptwgySxh5vz1HdvznQJjbQVgwYSvHEpYYeTXu1Ei8j7 + +SHARED_CMD="/home/user/substrate-relay send-message RialtoToMillau" +SHARED_HOST="--source-host rialto-node-bob --source-port 9944" +DAVE_SIGNER="--source-signer //Dave --target-signer //Dave" + +SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" + +# Sleep a bit between messages +rand_sleep() { + SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` + echo "Sleeping $SUBMIT_DELAY_S seconds..." + sleep $SUBMIT_DELAY_S +} + +# start sending large messages immediately +LARGE_MESSAGES_TIME=0 +# start sending message packs in a hour +BUNCH_OF_MESSAGES_TIME=3600 + +while true +do + rand_sleep + echo "Sending Remark from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark + + if [ ! -z $SECONDARY_MESSAGE_LANE ]; then + echo "Sending Remark from Rialto to Millau using Target Origin using secondary lane: $SECONDARY_MESSAGE_LANE" + $SEND_MESSAGE \ + --lane $SECONDARY_MESSAGE_LANE \ + --origin Target \ + remark + fi + + rand_sleep + echo "Sending Transfer from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + transfer \ + --amount 1000000000 \ + --recipient $FERDIE_ADDR + + rand_sleep + echo "Sending Remark from Rialto to Millau using Source Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Source \ + remark + + rand_sleep + echo "Sending Transfer from Rialto to Millau using Source Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Source \ + transfer \ + --amount 1000000000 \ + --recipient $FERDIE_ADDR + + # every other hour we're sending 3 large (size, weight, size+weight) messages + if [ $SECONDS -ge $LARGE_MESSAGES_TIME ]; then + LARGE_MESSAGES_TIME=$((SECONDS + 7200)) + + rand_sleep + echo "Sending Maximal Size Remark from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark \ + --remark-size=max + + rand_sleep + echo "Sending Maximal Dispatch Weight Remark from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark + + rand_sleep + echo "Sending Maximal Size and Dispatch Weight Remark from Rialto to Millau using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark \ + --remark-size=max + + fi + + # every other hour we're sending a bunch of small messages + if [ $SECONDS -ge $BUNCH_OF_MESSAGES_TIME ]; then + BUNCH_OF_MESSAGES_TIME=$((SECONDS + 7200)) + + for i in $(seq 1 $MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE); + do + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark + done + + fi +done diff --git a/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh new file mode 100755 index 00000000000..c24ec8ea7f4 --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT +# +# This scripts periodically calls the Substrate relay binary to generate messages. These messages +# are sent from the Millau network to the Rialto network. + +set -eu + +# Max delay before submitting transactions (s) +MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} +MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} +SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} +MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=128 +FERDIE_ADDR=6ztG3jPnJTwgZnnYsgCDXbbQVR82M96hBZtPvkN56A9668ZC + +SHARED_CMD=" /home/user/substrate-relay send-message MillauToRialto" +SHARED_HOST="--source-host millau-node-bob --source-port 9944" +DAVE_SIGNER="--target-signer //Dave --source-signer //Dave" + +SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" + +# Sleep a bit between messages +rand_sleep() { + SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` + echo "Sleeping $SUBMIT_DELAY_S seconds..." + sleep $SUBMIT_DELAY_S +} + +# start sending large messages immediately +LARGE_MESSAGES_TIME=0 +# start sending message packs in a hour +BUNCH_OF_MESSAGES_TIME=3600 + +while true +do + rand_sleep + echo "Sending Remark from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark + + if [ ! -z $SECONDARY_MESSAGE_LANE ]; then + echo "Sending Remark from Millau to Rialto using Target Origin using secondary lane: $SECONDARY_MESSAGE_LANE" + $SEND_MESSAGE \ + --lane $SECONDARY_MESSAGE_LANE \ + --origin Target \ + remark + fi + + rand_sleep + echo "Sending Transfer from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + transfer \ + --amount 1000000000 \ + --recipient $FERDIE_ADDR + + rand_sleep + echo "Sending Remark from Millau to Rialto using Source Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Source \ + remark + + rand_sleep + echo "Sending Transfer from Millau to Rialto using Source Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Source \ + transfer \ + --amount 1000000000 \ + --recipient $FERDIE_ADDR + + # every other hour we're sending 3 large (size, weight, size+weight) messages + if [ $SECONDS -ge $LARGE_MESSAGES_TIME ]; then + LARGE_MESSAGES_TIME=$((SECONDS + 7200)) + + rand_sleep + echo "Sending Maximal Size Remark from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark \ + --remark-size=max + + rand_sleep + echo "Sending Maximal Dispatch Weight Remark from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark + + rand_sleep + echo "Sending Maximal Size and Dispatch Weight Remark from Millau to Rialto using Target Origin" + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + --dispatch-weight=max \ + remark \ + --remark-size=max + + fi + + # every other hour we're sending a bunch of small messages + if [ $SECONDS -ge $BUNCH_OF_MESSAGES_TIME ]; then + BUNCH_OF_MESSAGES_TIME=$((SECONDS + 7200)) + + for i in $(seq 1 $MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE); + do + $SEND_MESSAGE \ + --lane $MESSAGE_LANE \ + --origin Target \ + remark + done + + fi +done diff --git a/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh new file mode 100755 index 00000000000..d8d3290428f --- /dev/null +++ b/polkadot/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://millau-node-alice:9933/health +curl -v http://rialto-node-alice:9933/health + +/home/user/substrate-relay init-bridge MillauToRialto \ + --source-host millau-node-alice \ + --source-port 9944 \ + --target-host rialto-node-alice \ + --target-port 9944 \ + --target-signer //Alice + +/home/user/substrate-relay init-bridge RialtoToMillau \ + --source-host rialto-node-alice \ + --source-port 9944 \ + --target-host millau-node-alice \ + --target-port 9944 \ + --target-signer //Alice + +# Give chain a little bit of time to process initialization transaction +sleep 6 + +/home/user/substrate-relay relay-headers-and-messages millau-rialto \ + --millau-host millau-node-alice \ + --millau-port 9944 \ + --millau-signer //Charlie \ + --rialto-host rialto-node-alice \ + --rialto-port 9944 \ + --rialto-signer //Charlie \ + --lane=00000000 \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json b/polkadot/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json new file mode 100644 index 00000000000..e73ddea40f1 --- /dev/null +++ b/polkadot/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json @@ -0,0 +1,694 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 5 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "5m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "5m", + "frequency": "5m", + "handler": 1, + "message": "", + "name": "Synced Header Difference is Over 5 (Westend to Millau)", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Shows how many headers behind the target chain is from the source chain.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}) - max(Westend_to_Millau_Sync_best_block_numbers{node=\"target\"})", + "format": "table", + "instant": false, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 5 + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Difference Between Source and Target Headers", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 5 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "2m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "3m", + "frequency": "5m", + "handler": 1, + "name": "No New Headers (Westend to Millau)", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "How many headers has the relay synced from the source node in the last 2 mins?", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])", + "interval": "", + "legendFormat": "Number of new Headers on Westend (Last 2 Mins)", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 5 + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Headers Synced on Millau (Last 2 Mins)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": { + "align": null + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 2, + "interval": "5s", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Westend_to_Millau_Sync_best_block_numbers", + "format": "time_series", + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Best Known Header on {{node}} Node", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Best Blocks according to Relay", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Westend_to_Millau_Sync_system_average_load", + "interval": "", + "legendFormat": "Average system load in last {{over}}", + "refId": "A" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": null + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average System Load", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 8 + }, + "id": 12, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "avg_over_time(Westend_to_Millau_Sync_process_cpu_usage_percentage[1m])", + "instant": true, + "interval": "", + "legendFormat": "1 CPU = 100", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Relay Process CPU Usage ", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 14 + }, + "id": 4, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showUnfilled": true + }, + "pluginVersion": "7.1.3", + "targets": [ + { + "expr": "Westend_to_Millau_Sync_blocks_in_state", + "instant": true, + "interval": "", + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Queued Headers in Relay", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "Westend_to_Millau_Sync_process_memory_usage_bytes / 1024 / 1024", + "interval": "", + "legendFormat": "Process memory, MB", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage for Relay Process", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Westend to Millau Header Sync Dashboard", + "uid": "relay-westend-to-millau-headers", + "version": 1 +} diff --git a/polkadot/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml b/polkadot/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml new file mode 100644 index 00000000000..5d49e112744 --- /dev/null +++ b/polkadot/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml @@ -0,0 +1,2 @@ +- targets: + - relay-headers-westend-to-millau:9616 diff --git a/polkadot/deployments/bridges/westend-millau/docker-compose.yml b/polkadot/deployments/bridges/westend-millau/docker-compose.yml new file mode 100644 index 00000000000..8caa17ffb82 --- /dev/null +++ b/polkadot/deployments/bridges/westend-millau/docker-compose.yml @@ -0,0 +1,31 @@ +# Exposed ports: 10616 + +version: '3.5' +services: + relay-headers-westend-to-millau: + image: paritytech/substrate-relay + entrypoint: /entrypoints/relay-headers-westend-to-millau-entrypoint.sh + volumes: + - ./bridges/westend-millau/entrypoints:/entrypoints + environment: + RUST_LOG: rpc=trace,bridge=trace + ports: + - "10616:9616" + depends_on: + - millau-node-alice + + # Note: These are being overridden from the top level `monitoring` compose file. + grafana-dashboard: + environment: + VIRTUAL_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link + VIRTUAL_PORT: 3000 + LETSENCRYPT_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link + LETSENCRYPT_EMAIL: admin@parity.io + volumes: + - ./bridges/westend-millau/dashboard/grafana:/etc/grafana/dashboards/westend-millau:ro + + prometheus-metrics: + volumes: + - ./bridges/westend-millau/dashboard/prometheus/targets.yml:/etc/prometheus/targets-westend-millau.yml + depends_on: + - relay-headers-westend-to-millau diff --git a/polkadot/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh b/polkadot/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh new file mode 100755 index 00000000000..740a9a97396 --- /dev/null +++ b/polkadot/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -xeu + +sleep 3 +curl -v http://millau-node-alice:9933/health +curl -v https://westend-rpc.polkadot.io:443/health + +/home/user/substrate-relay init-bridge WestendToMillau \ + --source-host westend-rpc.polkadot.io \ + --source-port 443 \ + --source-secure \ + --target-host millau-node-alice \ + --target-port 9944 \ + --target-signer //George + +# Give chain a little bit of time to process initialization transaction +sleep 6 +/home/user/substrate-relay relay-headers WestendToMillau \ + --source-host westend-rpc.polkadot.io \ + --source-port 443 \ + --source-secure \ + --target-host millau-node-alice \ + --target-port 9944 \ + --target-signer //George \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/dev/poa-config/keys/BridgePoa/address_book.json b/polkadot/deployments/dev/poa-config/keys/BridgePoa/address_book.json new file mode 100644 index 00000000000..9e26dfeeb6e --- /dev/null +++ b/polkadot/deployments/dev/poa-config/keys/BridgePoa/address_book.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/polkadot/deployments/dev/poa-config/keys/BridgePoa/arthur.json b/polkadot/deployments/dev/poa-config/keys/BridgePoa/arthur.json new file mode 100644 index 00000000000..fa59a46480c --- /dev/null +++ b/polkadot/deployments/dev/poa-config/keys/BridgePoa/arthur.json @@ -0,0 +1 @@ +{"id":"dd04f316-bc9d-2deb-4a34-51014cd5f34f","version":3,"crypto":{"cipher":"aes-128-ctr","cipherparams":{"iv":"aa91e6f0e6cf48208be4a1bcf15c6f30"},"ciphertext":"6e057599b13a87e8181bb39a40e14848fdc97958d493ddfa6bb1260350f69328","kdf":"pbkdf2","kdfparams":{"c":10240,"dklen":32,"prf":"hmac-sha256","salt":"79dd8c09c5c066b830179a2558a51efca6d97c0db2c4128090a01835786823c5"},"mac":"8f8b8e2c9de29ec8eefc54a60055e30ae7ff4dd4a367eaf38880edb887da771e"},"address":"005e714f896a8b7cede9d38688c1a81de72a58e4","name":"","meta":"{}"} \ No newline at end of file diff --git a/polkadot/deployments/dev/poa-config/pass b/polkadot/deployments/dev/poa-config/pass new file mode 100644 index 00000000000..f3097ab1308 --- /dev/null +++ b/polkadot/deployments/dev/poa-config/pass @@ -0,0 +1 @@ +password diff --git a/polkadot/deployments/dev/poa-config/poa-node-config b/polkadot/deployments/dev/poa-config/poa-node-config new file mode 100644 index 00000000000..146bbac17cf --- /dev/null +++ b/polkadot/deployments/dev/poa-config/poa-node-config @@ -0,0 +1,17 @@ +[parity] +chain = "./deployments/dev/poa-config/poa.json" +keys_path = "./deployments/dev/poa-config/keys" +no_persistent_txqueue = true + +[account] +password = ["./deployments/dev/poa-config/pass"] + +[rpc] +apis = ["all"] +cors = ["moz-extension://*", "chrome-extension://*"] + +[mining] +force_sealing = true + +[misc] +unsafe_expose = true diff --git a/polkadot/deployments/dev/poa-config/poa.json b/polkadot/deployments/dev/poa-config/poa.json new file mode 100644 index 00000000000..ecc21766b03 --- /dev/null +++ b/polkadot/deployments/dev/poa-config/poa.json @@ -0,0 +1,178 @@ +{ + "name": "BridgePoa", + "engine": { + "authorityRound": { + "params": { + "stepDuration": 10, + "validators": { + "list": [ + "0x005e714f896a8b7cede9d38688c1a81de72a58e4" + ] + }, + "validateScoreTransition": 0, + "validateStepTransition": 0, + "maximumUncleCountTransition": 0, + "maximumUncleCount": 0, + "emptyStepsTransition": "0xfffffffff", + "maximumEmptySteps": 1 + } + } + }, + "params": { + "accountStartNonce": "0x0", + "eip1014Transition": "0x0", + "eip1052Transition": "0x0", + "eip140Transition": "0x0", + "eip145Transition": "0x0", + "eip150Transition": "0x0", + "eip155Transition": "0x0", + "eip160Transition": "0x0", + "eip161abcTransition": "0x0", + "eip161dTransition": "0x0", + "eip211Transition": "0x0", + "eip214Transition": "0x0", + "eip658Transition": "0x0", + "eip98Transition": "0x7fffffffffffff", + "gasLimitBoundDivisor": "0x0400", + "maxCodeSize": 24576, + "maxCodeSizeTransition": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x69", + "validateChainIdTransition": "0x0", + "validateReceiptsTransition": "0x0" + }, + "genesis": { + "seal": { + "authorityRound": { + "step": "0x0", + "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + }, + "difficulty": "0x20000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x", + "gasLimit": "0x222222" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0000000000000000000000000000000000000005": { "balance": "1", "builtin": { "name": "modexp", "activate_at": 0, "pricing": { "modexp": { "divisor": 20 } } } }, + "0000000000000000000000000000000000000006": { + "balance": "1", + "builtin": { + "name": "alt_bn128_add", + "pricing": { + "0": { + "price": { "alt_bn128_const_operations": { "price": 500 }} + }, + "0x7fffffffffffff": { + "info": "EIP 1108 transition", + "price": { "alt_bn128_const_operations": { "price": 150 }} + } + } + } + }, + "0000000000000000000000000000000000000007": { + "balance": "1", + "builtin": { + "name": "alt_bn128_mul", + "pricing": { + "0": { + "price": { "alt_bn128_const_operations": { "price": 40000 }} + }, + "0x7fffffffffffff": { + "info": "EIP 1108 transition", + "price": { "alt_bn128_const_operations": { "price": 6000 }} + } + } + } + }, + "0000000000000000000000000000000000000008": { + "balance": "1", + "builtin": { + "name": "alt_bn128_pairing", + "pricing": { + "0": { + "price": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 }} + }, + "0x7fffffffffffff": { + "info": "EIP 1108 transition", + "price": { "alt_bn128_pairing": { "base": 45000, "pair": 34000 }} + } + } + } + }, + "0x0000000000000000000000000000000000000009": { + "builtin": { + "name": "blake2_f", + "activate_at": "0xd751a5", + "pricing": { + "blake2_f": { + "gas_per_round": 1 + } + } + } + }, + "0x0000000000000000000000000000000000000010": { + "builtin": { + "name": "parse_substrate_header", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000011": { + "builtin": { + "name": "get_substrate_header_signal", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000012": { + "builtin": { + "name": "verify_substrate_finality_proof", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x0000000000000000000000000000000000000013": { + "builtin": { + "name": "my_test", + "pricing": { + "linear": { + "base": 3000, + "word": 0 + } + } + } + }, + "0x005e714f896a8b7cede9d38688c1a81de72a58e4": { + "balance": "1606938044258990275541962092341162602522202993782792835301376", + "nonce": "0x1" + }, + "0x007594304039c2937a12220338aab821d819f5a4": { + "balance": "1606938044258990275541962092341162602522202993782792835301376", + "nonce": "0x1" + }, + "0x004e7a39907f090e19b0b80a277e77b72b22e269": { + "balance": "1606938044258990275541962092341162602522202993782792835301376", + "nonce": "0x1" + } + } +} diff --git a/polkadot/deployments/local-scripts/bridge-entrypoint.sh b/polkadot/deployments/local-scripts/bridge-entrypoint.sh new file mode 100755 index 00000000000..5c1b6e90ec2 --- /dev/null +++ b/polkadot/deployments/local-scripts/bridge-entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -xeu + +# This will allow us to run whichever binary the user wanted +# with arguments passed through `docker run` +# e.g `docker run -it rialto-bridge-node-dev --dev --tmp` +/home/user/$PROJECT $@ diff --git a/polkadot/deployments/local-scripts/relay-headers-rococo-to-westend.sh b/polkadot/deployments/local-scripts/relay-headers-rococo-to-westend.sh new file mode 100755 index 00000000000..d54d16f7e34 --- /dev/null +++ b/polkadot/deployments/local-scripts/relay-headers-rococo-to-westend.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Run an instance of the Rococo -> Westend header sync. +# +# Right now this relies on local Westend and Rococo networks +# running (which include `pallet-bridge-grandpa` in their +# runtimes), but in the future it could use use public RPC nodes. + +set -xeu + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge RococoToWestend \ + --source-host 127.0.0.1 \ + --source-port 9955 \ + --target-host 127.0.0.1 \ + --target-port 9944 \ + --target-signer //Eve + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers RococoToWestend \ + --source-host 127.0.0.1 \ + --source-port 9955 \ + --target-host 127.0.0.1 \ + --target-port 9944 \ + --target-signer //Bob \ + --prometheus-host=0.0.0.0 \ diff --git a/polkadot/deployments/local-scripts/relay-headers-westend-to-rococo.sh b/polkadot/deployments/local-scripts/relay-headers-westend-to-rococo.sh new file mode 100755 index 00000000000..e718656a9d1 --- /dev/null +++ b/polkadot/deployments/local-scripts/relay-headers-westend-to-rococo.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Run an instance of the Westend -> Rococo header sync. +# +# Right now this relies on local Westend and Rococo networks +# running (which include `pallet-bridge-grandpa` in their +# runtimes), but in the future it could use use public RPC nodes. + +set -xeu + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge WestendToRococo \ + --source-host 127.0.0.1 \ + --source-port 9944 \ + --target-host 127.0.0.1 \ + --target-port 9955 \ + --target-signer //Dave + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers WestendToRococo \ + --source-host 127.0.0.1 \ + --source-port 9944 \ + --target-host 127.0.0.1 \ + --target-port 9955 \ + --target-signer //Charlie \ + --prometheus-host=0.0.0.0 \ diff --git a/polkadot/deployments/local-scripts/relay-messages-millau-to-rialto.sh b/polkadot/deployments/local-scripts/relay-messages-millau-to-rialto.sh new file mode 100755 index 00000000000..5b298a149f8 --- /dev/null +++ b/polkadot/deployments/local-scripts/relay-messages-millau-to-rialto.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# A script for relaying Millau messages to the Rialto chain. +# +# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` +# and `run-millau-node.sh). +set -xeu + +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay relay-messages MillauToRialto \ + --lane 00000000 \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --source-signer //Bob \ + --target-host localhost \ + --target-port $RIALTO_PORT \ + --target-signer //Bob \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/local-scripts/relay-messages-rialto-to-millau.sh b/polkadot/deployments/local-scripts/relay-messages-rialto-to-millau.sh new file mode 100755 index 00000000000..616697192b9 --- /dev/null +++ b/polkadot/deployments/local-scripts/relay-messages-rialto-to-millau.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# A script for relaying Rialto messages to the Millau chain. +# +# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` +# and `run-millau-node.sh). +set -xeu + +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay relay-messages RialtoToMillau \ + --lane 00000000 \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --source-signer //Bob \ + --target-host localhost \ + --target-port $MILLAU_PORT \ + --target-signer //Bob \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/local-scripts/relay-millau-to-rialto.sh b/polkadot/deployments/local-scripts/relay-millau-to-rialto.sh new file mode 100755 index 00000000000..59c75de3899 --- /dev/null +++ b/polkadot/deployments/local-scripts/relay-millau-to-rialto.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# A script for relaying Millau headers to the Rialto chain. +# +# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` +# and `run-millau-node.sh). + +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay init-bridge MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --target-host localhost \ + --target-port $RIALTO_PORT \ + --target-signer //Alice \ + +sleep 5 +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay relay-headers MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --target-host localhost \ + --target-port $RIALTO_PORT \ + --target-signer //Alice \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/local-scripts/relay-rialto-to-millau.sh b/polkadot/deployments/local-scripts/relay-rialto-to-millau.sh new file mode 100755 index 00000000000..6382cdca823 --- /dev/null +++ b/polkadot/deployments/local-scripts/relay-rialto-to-millau.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# A script for relaying Rialto headers to the Millau chain. +# +# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` +# and `run-millau-node.sh). + +MILLAU_PORT="${MILLAU_PORT:-9945}" +RIALTO_PORT="${RIALTO_PORT:-9944}" + +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay init-bridge RialtoToMillau \ + --target-host localhost \ + --target-port $MILLAU_PORT \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ + +sleep 5 +RUST_LOG=bridge=debug \ +./target/debug/substrate-relay relay-headers RialtoToMillau \ + --target-host localhost \ + --target-port $MILLAU_PORT \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ + --prometheus-host=0.0.0.0 diff --git a/polkadot/deployments/local-scripts/run-millau-node.sh b/polkadot/deployments/local-scripts/run-millau-node.sh new file mode 100755 index 00000000000..916f876c536 --- /dev/null +++ b/polkadot/deployments/local-scripts/run-millau-node.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Run a development instance of the Millau Substrate bridge node. +# To override the default port just export MILLAU_PORT=9945 + +MILLAU_PORT="${MILLAU_PORT:-9945}" + +RUST_LOG=runtime=trace \ +./target/debug/millau-bridge-node --dev --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33044 --rpc-port 9934 --ws-port $MILLAU_PORT \ diff --git a/polkadot/deployments/local-scripts/run-rialto-node.sh b/polkadot/deployments/local-scripts/run-rialto-node.sh new file mode 100755 index 00000000000..e7987e2af36 --- /dev/null +++ b/polkadot/deployments/local-scripts/run-rialto-node.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Run a development instance of the Rialto Substrate bridge node. +# To override the default port just export RIALTO_PORT=9944 + +RIALTO_PORT="${RIALTO_PORT:-9944}" + +RUST_LOG=runtime=trace \ + ./target/debug/rialto-bridge-node --dev --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33033 --rpc-port 9933 --ws-port $RIALTO_PORT \ diff --git a/polkadot/deployments/local-scripts/run-rococo-bob-node.sh b/polkadot/deployments/local-scripts/run-rococo-bob-node.sh new file mode 100755 index 00000000000..550d8cf7553 --- /dev/null +++ b/polkadot/deployments/local-scripts/run-rococo-bob-node.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run a development instance of the Rococo Substrate bridge node. +# To override the default port just export ROCOCO_PORT=9966 +# +# Note: This script will not work out of the box with the bridges +# repo since it relies on a Polkadot binary. + +ROCOCO_BOB_PORT="${ROCOCO_BOB_PORT:-9966}" + +RUST_LOG=runtime=trace,runtime::bridge=trace \ +./target/debug/polkadot --chain=rococo-local --bob --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33055 --rpc-port 9935 --ws-port $ROCOCO_BOB_PORT \ diff --git a/polkadot/deployments/local-scripts/run-rococo-node.sh b/polkadot/deployments/local-scripts/run-rococo-node.sh new file mode 100755 index 00000000000..073d39a3eaf --- /dev/null +++ b/polkadot/deployments/local-scripts/run-rococo-node.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run a development instance of the Rococo Substrate bridge node. +# To override the default port just export ROCOCO_PORT=9955 +# +# Note: This script will not work out of the box with the bridges +# repo since it relies on a Polkadot binary. + +ROCOCO_PORT="${ROCOCO_PORT:-9955}" + +RUST_LOG=runtime=trace,runtime::bridge=trace \ +./target/debug/polkadot --chain=rococo-local --alice --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33044 --rpc-port 9934 --ws-port $ROCOCO_PORT \ diff --git a/polkadot/deployments/local-scripts/run-westend-node.sh b/polkadot/deployments/local-scripts/run-westend-node.sh new file mode 100755 index 00000000000..1bb490fc1a8 --- /dev/null +++ b/polkadot/deployments/local-scripts/run-westend-node.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +# Run a development instance of the Westend Substrate bridge node. +# To override the default port just export WESTEND_PORT=9945 +# +# Note: This script will not work out of the box with the bridges +# repo since it relies on a Polkadot binary. + +WESTEND_PORT="${WESTEND_PORT:-9944}" + +RUST_LOG=runtime=trace,runtime::bridge=trace \ +./target/debug/polkadot --chain=westend-dev --alice --tmp \ + --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ + --port 33033 --rpc-port 9933 --ws-port $WESTEND_PORT \ diff --git a/polkadot/deployments/monitoring/GrafanaMatrix.Dockerfile b/polkadot/deployments/monitoring/GrafanaMatrix.Dockerfile new file mode 100644 index 00000000000..420e134716a --- /dev/null +++ b/polkadot/deployments/monitoring/GrafanaMatrix.Dockerfile @@ -0,0 +1,18 @@ +FROM ruby:alpine + +RUN apk add --no-cache git + +ENV APP_HOME /app +ENV RACK_ENV production +RUN mkdir $APP_HOME +WORKDIR $APP_HOME + +# The latest master has some changes in how the application is run. We don't +# want to update just yet so we're pinning to an old commit. +RUN git clone https://github.com/ananace/ruby-grafana-matrix.git $APP_HOME +RUN git checkout 0d662b29633d16176291d11a2d85ba5107cf7de3 +RUN bundle install --without development + +RUN mkdir /config && touch /config/config.yml && ln -s /config/config.yml ./config.yml + +CMD ["bundle", "exec", "bin/server"] diff --git a/polkadot/deployments/monitoring/disabled.yml b/polkadot/deployments/monitoring/disabled.yml new file mode 100644 index 00000000000..a0b4ed3aad0 --- /dev/null +++ b/polkadot/deployments/monitoring/disabled.yml @@ -0,0 +1,15 @@ +# A disabled version of monitoring. +# +# We replace each service with a no-op container. We can't simply not include this file, +# cause the bridge-specific compose files might have overrides. +version: '3.5' +services: + prometheus-metrics: + image: alpine + + grafana-dashboard: + image: alpine + + grafana-matrix-notifier: + image: alpine + diff --git a/polkadot/deployments/monitoring/docker-compose.yml b/polkadot/deployments/monitoring/docker-compose.yml new file mode 100644 index 00000000000..5456cb76dc7 --- /dev/null +++ b/polkadot/deployments/monitoring/docker-compose.yml @@ -0,0 +1,32 @@ +version: '3.5' +services: + prometheus-metrics: + image: prom/prometheus:v2.20.1 + volumes: + - ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + + grafana-dashboard: + image: grafana/grafana:7.1.3 + environment: + GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASS:-admin} + GF_SERVER_ROOT_URL: ${GRAFANA_SERVER_ROOT_URL} + GF_SERVER_DOMAIN: ${GRAFANA_SERVER_DOMAIN} + volumes: + - ./monitoring/grafana/provisioning/:/etc/grafana/provisioning/:ro + ports: + - "3000:3000" + depends_on: + - prometheus-metrics + + grafana-matrix-notifier: + build: + context: . + dockerfile: ./monitoring/GrafanaMatrix.Dockerfile + volumes: + - ./monitoring/grafana-matrix:/config + ports: + - "4567:4567" + depends_on: + - grafana-dashboard diff --git a/polkadot/deployments/monitoring/grafana-matrix/config.yml b/polkadot/deployments/monitoring/grafana-matrix/config.yml new file mode 100644 index 00000000000..645ee708fef --- /dev/null +++ b/polkadot/deployments/monitoring/grafana-matrix/config.yml @@ -0,0 +1,49 @@ +--- +# Webhook server configuration +# Or use the launch options `-o '::' -p 4567` +#bind: '::' +#port: 4567 + +# Set up your HS connections +matrix: +- name: matrix-parity-io + url: https://matrix.parity.io + # Create a user - log that user in using a post request + # curl -XPOST -d '{"type": "m.login.password", + # "user":"grafana", + # "password":"dummy-password"}' + # "https://my-matrix-server/_matrix/client/r0/login" + # Fill that access token in here + access_token: "" + #device_id: # Optional + +# The default message type for messages, should be either m.text or m.notice, +# defaults to m.text +msgtype: m.text + +# Set up notification ingress rules +rules: +- name: bridge # Name of the rule + room: "#bridges-workers:matrix.parity.io" # Room or ID + matrix: matrix-parity-io # The Matrix HS to use - defaults to first one + msgtype: m.notice + # The following values are optional: + image: true # Attach image to the notification? + embed_image: true # Upload and embed the image into the message? + #templates: + # Templates to use when rendering the notification, available placeholders: + # %TEMPLATES% - lib/grafana_matrix/templates + # $ - Environment variables + #html: "%TEMPLATES%/html.erb" # Path to HTML template + #plain: "%TEMPLATES%/plain.erb" # Path to plaintext template + #auth: + #user: example + #pass: any HTTP encodable string +#- name: other-hq +# room: "#hq:private.matrix.org +# matrix: matrix-priv + +# To use the webhook, you need to configure it into Grafana as: +# +# Url: http://:/hook?rule= +# Http Method: POST diff --git a/polkadot/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml b/polkadot/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml new file mode 100644 index 00000000000..d14ed2637d5 --- /dev/null +++ b/polkadot/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml @@ -0,0 +1,6 @@ +- name: 'default' + orgId: 1 + folder: '' + type: file + options: + path: '/etc/grafana/dashboards' \ No newline at end of file diff --git a/polkadot/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml b/polkadot/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml new file mode 100644 index 00000000000..b85cf06e2bd --- /dev/null +++ b/polkadot/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml @@ -0,0 +1,16 @@ +# list of datasources to insert/update depending +# whats available in the database +datasources: + # name of the datasource. Required +- name: Prometheus + # datasource type. Required + type: prometheus + # access mode. direct or proxy. Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://prometheus-metrics:9090 + # mark as default datasource. Max one per org + isDefault: true + version: 1 diff --git a/polkadot/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml b/polkadot/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml new file mode 100644 index 00000000000..4eb6ea3863e --- /dev/null +++ b/polkadot/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml @@ -0,0 +1,15 @@ +notifiers: + - name: Matrix + type: webhook + uid: notifier1 + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + settings: + url: http://grafana-matrix-notifier:4567/hook?rule=bridge + http_method: POST + +delete_notifiers: + - name: Matrix + uid: notifier1 diff --git a/polkadot/deployments/monitoring/prometheus/prometheus.yml b/polkadot/deployments/monitoring/prometheus/prometheus.yml new file mode 100644 index 00000000000..7092bd27314 --- /dev/null +++ b/polkadot/deployments/monitoring/prometheus/prometheus.yml @@ -0,0 +1,7 @@ +global: + scrape_interval: 15s +scrape_configs: + - job_name: dummy + file_sd_configs: + - files: + - /etc/prometheus/targets-*.yml diff --git a/polkadot/deployments/networks/OpenEthereum.Dockerfile b/polkadot/deployments/networks/OpenEthereum.Dockerfile new file mode 100644 index 00000000000..d47708ca29b --- /dev/null +++ b/polkadot/deployments/networks/OpenEthereum.Dockerfile @@ -0,0 +1,91 @@ +FROM ubuntu:xenial AS builder + +# show backtraces +ENV RUST_BACKTRACE 1 + +ENV LAST_DEPS_UPDATE 2020-06-19 + +# install tools and dependencies +RUN set -eux; \ + apt-get update && \ + apt-get install -y file curl jq ca-certificates && \ + apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev + +ENV LAST_CERTS_UPDATE 2020-06-19 + +RUN update-ca-certificates && \ + curl https://sh.rustup.rs -sSf | sh -s -- -y + +ENV PATH="/root/.cargo/bin:${PATH}" +ENV LAST_RUST_UPDATE="2020-09-09" +RUN rustup update stable && \ + rustup install nightly && \ + rustup target add wasm32-unknown-unknown --toolchain nightly + +RUN rustc -vV && \ + cargo -V && \ + gcc -v && \ + g++ -v && \ + cmake --version + +WORKDIR /openethereum + +### Build from the repo +ARG ETHEREUM_REPO=https://github.com/paritytech/openethereum.git +ARG ETHEREUM_HASH=344991dbba2bc8657b00916f0e4b029c66f159e8 +RUN git clone $ETHEREUM_REPO /openethereum && git checkout $ETHEREUM_HASH + +### Build locally. Make sure to set the CONTEXT to main directory of the repo. +# ADD openethereum /openethereum + +WORKDIR /parity-bridges-common + +### Build from the repo +# Build using `master` initially. +ARG BRIDGE_REPO=https://github.com/paritytech/parity-bridges-common +RUN git clone $BRIDGE_REPO /parity-bridges-common && git checkout master + +WORKDIR /openethereum +RUN cargo build --release --verbose || true + +# Then rebuild by switching to a different branch to only incrementally +# build the changes. +WORKDIR /parity-bridges-common +ARG BRIDGE_HASH=master +RUN git checkout . && git fetch && git checkout $BRIDGE_HASH +### Build locally. Make sure to set the CONTEXT to main directory of the repo. +# ADD . /parity-bridges-common + +WORKDIR /openethereum +RUN cargo build --release --verbose +RUN strip ./target/release/openethereum + +FROM ubuntu:xenial + +# show backtraces +ENV RUST_BACKTRACE 1 + +RUN set -eux; \ + apt-get update && \ + apt-get install -y curl + +RUN groupadd -g 1000 openethereum \ + && useradd -u 1000 -g openethereum -s /bin/sh -m openethereum + +# switch to user openethereum here +USER openethereum + +WORKDIR /home/openethereum + +COPY --chown=openethereum:openethereum --from=builder /openethereum/target/release/openethereum ./ +# Solve issues with custom --keys-path +RUN mkdir -p ~/.local/share/io.parity.ethereum/keys/ +# check if executable works in this container +RUN ./openethereum --version + +EXPOSE 8545 8546 30303/tcp 30303/udp + +HEALTHCHECK --interval=2m --timeout=5s \ + CMD curl -f http://localhost:8545/api/health || exit 1 + +ENTRYPOINT ["/home/openethereum/openethereum"] diff --git a/polkadot/deployments/networks/eth-poa.yml b/polkadot/deployments/networks/eth-poa.yml new file mode 100644 index 00000000000..7291a2ccfd7 --- /dev/null +++ b/polkadot/deployments/networks/eth-poa.yml @@ -0,0 +1,46 @@ +# Compose file for quickly spinning up a local instance of an Ethereum PoA network. +# +# Note that this PoA network is only used for testing, so the configuration settings you see here +# are *not* recommended for a production environment. +# +# For example, do *not* keep your account key in version control, and unless you're _really_ sure +# you want to provide public access to your nodes do *not* publicly expose RPC methods. +version: '3.5' +services: + poa-node-arthur: &poa-node + image: hcastano/openethereum-bridge-builtins + entrypoint: + - /home/openethereum/openethereum + - --config=/config/poa-node-config + - --node-key=arthur + - --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 + environment: + RUST_LOG: rpc=trace,txqueue=trace,bridge-builtin=trace + ports: + - "8545:8545" + - "8546:8546" + - "30303:30303" + + poa-node-bertha: + <<: *poa-node + entrypoint: + - /home/openethereum/openethereum + - --config=/config/poa-node-config + - --node-key=bertha + - --engine-signer=0x007594304039c2937a12220338aab821d819f5a4 + ports: + - "8645:8545" + - "8646:8546" + - "31303:30303" + + poa-node-carlos: + <<: *poa-node + entrypoint: + - /home/openethereum/openethereum + - --config=/config/poa-node-config + - --node-key=carlos + - --engine-signer=0x004e7a39907f090e19b0b80a277e77b72b22e269 + ports: + - "8745:8545" + - "8746:8546" + - "32303:30303" diff --git a/polkadot/deployments/networks/millau.yml b/polkadot/deployments/networks/millau.yml new file mode 100644 index 00000000000..54790579f1c --- /dev/null +++ b/polkadot/deployments/networks/millau.yml @@ -0,0 +1,87 @@ +# Compose file for quickly spinning up a local instance of the Millau Substrate network. +# +# Note that the Millau network is only used for testing, so the configuration settings you see here +# are *not* recommended for a production environment. +# +# For example, do *not* keep your `node-key` in version control, and unless you're _really_ sure you +# want to provide public access to your nodes do *not* publicly expose RPC methods. +version: '3.5' +services: + millau-node-alice: &millau-bridge-node + image: paritytech/millau-bridge-node + entrypoint: + - /home/user/millau-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/millau-node-bob/tcp/30333/p2p/12D3KooWM5LFR5ne4yTQ4sBSXJ75M4bDo2MAhAW2GhL3i8fe5aRb + - --alice + - --node-key=0f900c89f4e626f4a217302ab8c7d213737d00627115f318ad6fb169717ac8e0 + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + environment: + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace + ports: + - "19933:9933" + - "19944:9944" + + millau-node-bob: + <<: *millau-bridge-node + entrypoint: + - /home/user/millau-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H + - --bob + - --node-key=db383639ff2905d79f8e936fd5dc4416ef46b514b2f83823ec3c42753d7557bb + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "20033:9933" + - "20044:9944" + + millau-node-charlie: + <<: *millau-bridge-node + entrypoint: + - /home/user/millau-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H + - --charlie + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "20133:9933" + - "20144:9944" + + millau-node-dave: + <<: *millau-bridge-node + entrypoint: + - /home/user/millau-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H + - --dave + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "20233:9933" + - "20244:9944" + + millau-node-eve: + <<: *millau-bridge-node + entrypoint: + - /home/user/millau-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H + - --eve + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "20333:9933" + - "20344:9944" diff --git a/polkadot/deployments/networks/rialto.yml b/polkadot/deployments/networks/rialto.yml new file mode 100644 index 00000000000..3039d7c33bc --- /dev/null +++ b/polkadot/deployments/networks/rialto.yml @@ -0,0 +1,87 @@ +# Compose file for quickly spinning up a local instance of the Rialto Substrate network. +# +# Note that the Rialto network is only used for testing, so the configuration settings you see here +# are *not* recommended for a production environment. +# +# For example, do *not* keep your `node-key` in version control, and unless you're _really_ sure you +# want to provide public access to your nodes do *not* publicly expose RPC methods. +version: '3.5' +services: + rialto-node-alice: &rialto-bridge-node + image: paritytech/rialto-bridge-node + entrypoint: + - /home/user/rialto-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/rialto-node-bob/tcp/30333/p2p/12D3KooWSEpHJj29HEzgPFcRYVc5X3sEuP3KgiUoqJNCet51NiMX + - --alice + - --node-key=79cf382988364291a7968ae7825c01f68c50d679796a8983237d07fe0ccf363b + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + environment: + RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace + ports: + - "9933:9933" + - "9944:9944" + + rialto-node-bob: + <<: *rialto-bridge-node + entrypoint: + - /home/user/rialto-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE + - --bob + - --node-key=4f9d0146dd9b7b3bf5a8089e3880023d1df92057f89e96e07bb4d8c2ead75bbd + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "10033:9933" + - "10044:9944" + + rialto-node-charlie: + <<: *rialto-bridge-node + entrypoint: + - /home/user/rialto-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE + - --charlie + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "10133:9933" + - "10144:9944" + + rialto-node-dave: + <<: *rialto-bridge-node + entrypoint: + - /home/user/rialto-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE + - --dave + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "10233:9933" + - "10244:9944" + + rialto-node-eve: + <<: *rialto-bridge-node + entrypoint: + - /home/user/rialto-bridge-node + - --execution=Native + - --chain=local + - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE + - --eve + - --rpc-cors=all + - --unsafe-rpc-external + - --unsafe-ws-external + ports: + - "10333:9933" + - "10344:9944" diff --git a/polkadot/deployments/reverse-proxy/README.md b/polkadot/deployments/reverse-proxy/README.md new file mode 100644 index 00000000000..ded81f80a1b --- /dev/null +++ b/polkadot/deployments/reverse-proxy/README.md @@ -0,0 +1,15 @@ +# nginx-proxy + +This is a nginx reverse proxy configuration with Let's encrypt companion. +Main purpose is to be able to use `https://polkadot.js.org/apps` to connect to +a running network. + +## How to? + +In current directory: +```bash +docker-compose up -d +``` + +Then start `rialto` network with the same command (one folder up). `nginx` should +pick up new containers being created and automatically create a proxy setup for `Charlie`. diff --git a/polkadot/deployments/reverse-proxy/docker-compose.yml b/polkadot/deployments/reverse-proxy/docker-compose.yml new file mode 100644 index 00000000000..61c9505ae56 --- /dev/null +++ b/polkadot/deployments/reverse-proxy/docker-compose.yml @@ -0,0 +1,42 @@ +version: '2' +services: + nginx-proxy: + image: jwilder/nginx-proxy + container_name: nginx-proxy + networks: + - nginx-proxy + - deployments_default + ports: + - "80:80" + - "443:443" + volumes: + - conf:/etc/nginx/conf.d + - vhost:/etc/nginx/vhost.d + - html:/usr/share/nginx/html + - dhparam:/etc/nginx/dhparam + - certs:/etc/nginx/certs:ro + - /var/run/docker.sock:/tmp/docker.sock:ro + + letsencrypt: + image: jrcs/letsencrypt-nginx-proxy-companion + container_name: nginx-proxy-le + networks: + - nginx-proxy + volumes_from: + - nginx-proxy + volumes: + - certs:/etc/nginx/certs:rw + - /var/run/docker.sock:/var/run/docker.sock:ro + +volumes: + conf: + vhost: + html: + dhparam: + certs: + +networks: + nginx-proxy: + driver: bridge + deployments_default: + external: true diff --git a/polkadot/deployments/run.sh b/polkadot/deployments/run.sh new file mode 100755 index 00000000000..a79638352a3 --- /dev/null +++ b/polkadot/deployments/run.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +# Script used for running and updating bridge deployments. +# +# To deploy a network you can run this script with the name of the bridge (or multiple bridges) you want to run. +# +# `./run.sh poa-rialto rialto-millau` +# +# To update a deployment to use the latest images available from the Docker Hub add the `update` +# argument after the bridge name. +# +# `./run.sh rialto-millau update` +# +# Once you've stopped having fun with your deployment you can take it down with: +# +# `./run.sh rialto-millau stop` +# +# Stopping the bridge will also bring down all networks that it uses. So if you have started multiple bridges +# that are using the same network (like Millau in rialto-millau and westend-millau bridges), then stopping one +# of these bridges will cause the other bridge to break. + +set -xeu + +# Since the Compose commands are using relative paths we need to `cd` into the `deployments` folder. +cd "$( dirname "${BASH_SOURCE[0]}" )" + +function show_help () { + set +x + echo " " + echo Error: $1 + echo " " + echo "Usage:" + echo " ./run.sh poa-rialto [stop|update] Run PoA <> Rialto Networks & Bridge" + echo " ./run.sh rialto-millau [stop|update] Run Rialto <> Millau Networks & Bridge" + echo " ./run.sh westend-millau [stop|update] Run Westend -> Millau Networks & Bridge" + echo " " + echo "Options:" + echo " --no-monitoring Disable monitoring" + echo " " + echo "You can start multiple bridges at once by passing several bridge names:" + echo " ./run.sh poa-rialto rialto-millau westend-millau [stop|update]" + exit 1 +} + +RIALTO=' -f ./networks/rialto.yml' +MILLAU=' -f ./networks/millau.yml' +ETH_POA=' -f ./networks/eth-poa.yml' +MONITORING=' -f ./monitoring/docker-compose.yml' + +BRIDGES=() +NETWORKS='' +SUB_COMMAND='start' +for i in "$@" +do + case $i in + --no-monitoring) + MONITORING=" -f ./monitoring/disabled.yml" + shift + continue + ;; + poa-rialto) + BRIDGES+=($i) + NETWORKS+=${RIALTO} + RIALTO='' + NETWORKS+=${ETH_POA} + ETH_POA='' + shift + ;; + rialto-millau) + BRIDGES+=($i) + NETWORKS+=${RIALTO} + RIALTO='' + NETWORKS+=${MILLAU} + MILLAU='' + shift + ;; + westend-millau) + BRIDGES+=($i) + NETWORKS+=${MILLAU} + MILLAU='' + shift + ;; + start|stop|update) + SUB_COMMAND=$i + shift + ;; + *) + show_help "Unknown option: $i" + ;; + esac +done + +if [ ${#BRIDGES[@]} -eq 0 ]; then + show_help "Missing bridge name." +fi + +COMPOSE_FILES=$NETWORKS$MONITORING + +# Compose looks for .env files in the the current directory by default, we don't want that +COMPOSE_ARGS="--project-directory ." +# Path to env file that we want to use. Compose only accepts single `--env-file` argument, +# so we'll be using the last .env file we'll found. +COMPOSE_ENV_FILE='' + +for BRIDGE in "${BRIDGES[@]}" +do + BRIDGE_PATH="./bridges/$BRIDGE" + BRIDGE=" -f $BRIDGE_PATH/docker-compose.yml" + COMPOSE_FILES=$BRIDGE$COMPOSE_FILES + + # Remember .env file to use in docker-compose call + if [[ -f "$BRIDGE_PATH/.env" ]]; then + COMPOSE_ENV_FILE=" --env-file $BRIDGE_PATH/.env" + fi + + # Read and source variables from .env file so we can use them here + grep -e MATRIX_ACCESS_TOKEN -e WITH_PROXY $BRIDGE_PATH/.env > .env2 && . ./.env2 && rm .env2 + if [ ! -z ${MATRIX_ACCESS_TOKEN+x} ]; then + sed -i "s/access_token.*/access_token: \"$MATRIX_ACCESS_TOKEN\"/" ./monitoring/grafana-matrix/config.yml + fi +done + +# Final COMPOSE_ARGS +COMPOSE_ARGS="$COMPOSE_ARGS $COMPOSE_ENV_FILE" + +# Check the sub-command, perhaps we just mean to stop the network instead of starting it. +if [ "$SUB_COMMAND" == "stop" ]; then + + if [ ! -z ${WITH_PROXY+x} ]; then + cd ./reverse-proxy + docker-compose down + cd - + fi + + docker-compose $COMPOSE_ARGS $COMPOSE_FILES down + + exit 0 +fi + +# See if we want to update the docker images before starting the network. +if [ "$SUB_COMMAND" == "update" ]; then + + # Stop the proxy cause otherwise the network can't be stopped + if [ ! -z ${WITH_PROXY+x} ]; then + cd ./reverse-proxy + docker-compose down + cd - + fi + + + docker-compose $COMPOSE_ARGS $COMPOSE_FILES pull + docker-compose $COMPOSE_ARGS $COMPOSE_FILES down + docker-compose $COMPOSE_ARGS $COMPOSE_FILES build +fi + +docker-compose $COMPOSE_ARGS $COMPOSE_FILES up -d + +# Start the proxy if needed +if [ ! -z ${WITH_PROXY+x} ]; then + cd ./reverse-proxy + docker-compose up -d +fi diff --git a/polkadot/deployments/types-millau.json b/polkadot/deployments/types-millau.json new file mode 100644 index 00000000000..2414620733f --- /dev/null +++ b/polkadot/deployments/types-millau.json @@ -0,0 +1,172 @@ +{ + "--1": "Millau Types", + "MillauBalance": "u64", + "MillauBlockHash": "H512", + "MillauBlockNumber": "u64", + "MillauHeader": { + "parent_Hash": "MillauBlockHash", + "number": "Compact", + "state_root": "MillauBlockHash", + "extrinsics_root": "MillauBlockHash", + "digest": "MillauDigest" + }, + "MillauDigest": { + "logs": "Vec" + }, + "MillauDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "MillauBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--2": "Rialto Types", + "RialtoBalance": "u128", + "RialtoBlockHash": "H256", + "RialtoBlockNumber": "u32", + "RialtoHeader": { + "parent_Hash": "RialtoBlockHash", + "number": "Compact", + "state_root": "RialtoBlockHash", + "extrinsics_root": "RialtoBlockHash", + "digest": "RialtoDigest" + }, + "RialtoDigest": { + "logs": "Vec" + }, + "RialtoDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "RialtoBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "InstanceId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "last_confirmed_nonce": "MessageNonce" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + }, + "Fee": "MillauBalance", + "Balance": "MillauBalance", + "Hash": "MillauBlockHash", + "BlockHash": "MillauBlockHash", + "BlockNumber": "MillauBlockNumber", + "BridgedBlockHash": "RialtoBlockHash", + "BridgedBlockNumber": "RialtoBlockNumber", + "BridgedHeader": "RialtoHeader", + "Parameter": { + "_enum": { + "MillauToRialtoConversionRate": "u128" + } + } +} diff --git a/polkadot/deployments/types-rialto.json b/polkadot/deployments/types-rialto.json new file mode 100644 index 00000000000..bd746e003ea --- /dev/null +++ b/polkadot/deployments/types-rialto.json @@ -0,0 +1,171 @@ +{ + "--1": "Millau Types", + "MillauBalance": "u64", + "MillauBlockHash": "H512", + "MillauBlockNumber": "u64", + "MillauHeader": { + "parent_Hash": "MillauBlockHash", + "number": "Compact", + "state_root": "MillauBlockHash", + "extrinsics_root": "MillauBlockHash", + "digest": "MillauDigest" + }, + "MillauDigest": { + "logs": "Vec" + }, + "MillauDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "MillauBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--2": "Rialto Types", + "RialtoBalance": "u128", + "RialtoBlockHash": "H256", + "RialtoBlockNumber": "u32", + "RialtoHeader": { + "parent_Hash": "RialtoBlockHash", + "number": "Compact", + "state_root": "RialtoBlockHash", + "extrinsics_root": "RialtoBlockHash", + "digest": "RialtoDigest" + }, + "RialtoDigest": { + "logs": "Vec" + }, + "RialtoDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "RialtoBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "InstanceId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "last_confirmed_nonce": "MessageNonce" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + }, + "Fee": "RialtoBalance", + "Balance": "RialtoBalance", + "BlockHash": "RialtoBlockHash", + "BlockNumber": "RialtoBlockNumber", + "BridgedBlockHash": "MillauBlockHash", + "BridgedBlockNumber": "MillauBlockNumber", + "BridgedHeader": "MillauHeader", + "Parameter": { + "_enum": { + "RialtoToMillauConversionRate": "u128" + } + } +} diff --git a/polkadot/deployments/types/build.sh b/polkadot/deployments/types/build.sh new file mode 100755 index 00000000000..52605e7e4da --- /dev/null +++ b/polkadot/deployments/types/build.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +# The script generates JSON type definition files in `./deployment` directory to be used for +# JS clients. +# Both networks have a lot of common types, so to avoid duplication we merge `common.json` file with +# chain-specific definitions in `rialto|millau.json`. + +set -exu + +# Make sure we are in the right dir. +cd $(dirname $(realpath $0)) + +# Create rialto and millau types. +jq -s '.[0] * .[1]' common.json rialto.json > ../types-rialto.json +jq -s '.[0] * .[1]' common.json millau.json > ../types-millau.json diff --git a/polkadot/deployments/types/common.json b/polkadot/deployments/types/common.json new file mode 100644 index 00000000000..cf881288694 --- /dev/null +++ b/polkadot/deployments/types/common.json @@ -0,0 +1,159 @@ +{ + "--1": "Millau Types", + "MillauBalance": "u64", + "MillauBlockHash": "H512", + "MillauBlockNumber": "u64", + "MillauHeader": { + "parent_Hash": "MillauBlockHash", + "number": "Compact", + "state_root": "MillauBlockHash", + "extrinsics_root": "MillauBlockHash", + "digest": "MillauDigest" + }, + "MillauDigest": { + "logs": "Vec" + }, + "MillauDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "MillauBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--2": "Rialto Types", + "RialtoBalance": "u128", + "RialtoBlockHash": "H256", + "RialtoBlockNumber": "u32", + "RialtoHeader": { + "parent_Hash": "RialtoBlockHash", + "number": "Compact", + "state_root": "RialtoBlockHash", + "extrinsics_root": "RialtoBlockHash", + "digest": "RialtoDigest" + }, + "RialtoDigest": { + "logs": "Vec" + }, + "RialtoDigestItem": { + "_enum": { + "Other": "Vec", + "AuthoritiesChange": "Vec", + "ChangesTrieRoot": "RialtoBlockHash", + "SealV0": "SealV0", + "Consensus": "Consensus", + "Seal": "Seal", + "PreRuntime": "PreRuntime" + } + }, + "--3": "Common types", + "Address": "AccountId", + "LookupSource": "AccountId", + "AccountSigner": "MultiSigner", + "SpecVersion": "u32", + "RelayerId": "AccountId", + "SourceAccountId": "AccountId", + "ImportedHeader": { + "header": "BridgedHeader", + "requires_justification": "bool", + "is_finalized": "bool", + "signal_hash": "Option" + }, + "AuthoritySet": { + "authorities": "AuthorityList", + "set_id": "SetId" + }, + "Id": "[u8; 4]", + "InstanceId": "Id", + "LaneId": "Id", + "MessageNonce": "u64", + "MessageId": "(Id, u64)", + "MessageKey": { + "lane_id": "LaneId", + "nonce:": "MessageNonce" + }, + "InboundRelayer": "AccountId", + "InboundLaneData": { + "relayers": "Vec<(MessageNonce, MessageNonce, RelayerId)>", + "last_confirmed_nonce": "MessageNonce" + }, + "OutboundLaneData": { + "latest_generated_nonce": "MessageNonce", + "latest_received_nonce": "MessageNonce", + "oldest_unpruned_nonce": "MessageNonce" + }, + "MessageData": { + "payload": "MessagePayload", + "fee": "Fee" + }, + "MessagePayload": "Vec", + "BridgedOpaqueCall": "Vec", + "OutboundMessageFee": "Fee", + "OutboundPayload": { + "spec_version": "SpecVersion", + "weight": "Weight", + "origin": "CallOrigin", + "call": "BridgedOpaqueCall" + }, + "CallOrigin": { + "_enum": { + "SourceRoot": "()", + "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", + "SourceAccount": "SourceAccountId" + } + }, + "MultiSigner": { + "_enum": { + "Ed25519": "H256", + "Sr25519": "H256", + "Ecdsa": "[u8;33]" + } + }, + "MessagesProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId", + "nonces_start": "MessageNonce", + "nonces_end": "MessageNonce" + }, + "StorageProofItem": "Vec", + "MessagesDeliveryProofOf": { + "bridged_header_hash": "BridgedBlockHash", + "storage_proof": "Vec", + "lane": "LaneId" + }, + "UnrewardedRelayersState": { + "unrewarded_relayer_entries": "MessageNonce", + "messages_in_oldest_entry": "MessageNonce", + "total_messages": "MessageNonce" + }, + "AncestryProof": "()", + "MessageFeeData": { + "lane_id": "LaneId", + "payload": "OutboundPayload" + }, + "Precommit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber" + }, + "AuthoritySignature": "[u8;64]", + "AuthorityId": "[u8;32]", + "SignedPrecommit": { + "precommit": "Precommit", + "signature": "AuthoritySignature", + "id": "AuthorityId" + }, + "Commit": { + "target_hash": "BridgedBlockHash", + "target_number": "BridgedBlockNumber", + "precommits": "Vec" + }, + "GrandpaJustification": { + "round": "u64", + "commit": "Commit", + "votes_ancestries": "Vec" + } +} diff --git a/polkadot/deployments/types/millau.json b/polkadot/deployments/types/millau.json new file mode 100644 index 00000000000..bfc86491a52 --- /dev/null +++ b/polkadot/deployments/types/millau.json @@ -0,0 +1,16 @@ +{ + "Fee": "MillauBalance", + "Balance": "MillauBalance", + "Hash": "MillauBlockHash", + "BlockHash": "MillauBlockHash", + "BlockNumber": "MillauBlockNumber", + "BridgedBlockHash": "RialtoBlockHash", + "BridgedBlockNumber": "RialtoBlockNumber", + "BridgedHeader": "RialtoHeader", + "Parameter": { + "_enum": { + "MillauToRialtoConversionRate": "u128" + } + } + +} diff --git a/polkadot/deployments/types/rialto.json b/polkadot/deployments/types/rialto.json new file mode 100644 index 00000000000..fe1ba31e8aa --- /dev/null +++ b/polkadot/deployments/types/rialto.json @@ -0,0 +1,14 @@ +{ + "Fee": "RialtoBalance", + "Balance": "RialtoBalance", + "BlockHash": "RialtoBlockHash", + "BlockNumber": "RialtoBlockNumber", + "BridgedBlockHash": "MillauBlockHash", + "BridgedBlockNumber": "MillauBlockNumber", + "BridgedHeader": "MillauHeader", + "Parameter": { + "_enum": { + "RialtoToMillauConversionRate": "u128" + } + } +} diff --git a/polkadot/diagrams/ARCHITECTURE.md b/polkadot/diagrams/ARCHITECTURE.md new file mode 100644 index 00000000000..6da88c448c9 --- /dev/null +++ b/polkadot/diagrams/ARCHITECTURE.md @@ -0,0 +1,13 @@ +# Bridge Architecture Diagrams + +## Bridge Relay +![General Overview](general-overview.svg) +![Bridge Relay Node](bridge-relay.svg) + +## Runtime Modules +![Ethereum Pallet](ethereum-pallet.svg) +![Currency Exchange Pallet](currency-exchange-pallet.svg) + +## Usage +![Cross Chain Fund Transfer](cross-chain-fund-transfer.svg) +![Parachain](parachain.svg) diff --git a/polkadot/diagrams/bridge-architecture-diagrams.drawio b/polkadot/diagrams/bridge-architecture-diagrams.drawio new file mode 100644 index 00000000000..bf073129c29 --- /dev/null +++ b/polkadot/diagrams/bridge-architecture-diagrams.drawio @@ -0,0 +1 @@ +5VjZctowFP0aHul4wYAfCVm70DR0JklfOootbE1kiQg52P36XmF5lSctaWkmDQ9gHclXvuccpGsN3HmSnQm0jj/xENOBY4XZwD0eOI499X34UUheII7tjQokEiTUo2pgSX5gDVoaTUmIN62BknMqyboNBpwxHMgWhoTg2/awFaftWdcowgawDBA10WsSyrhAp55V4+eYRHE5s23pnjsU3EeCp0zPxzjDRU+CyjB66CZGId82IPdk4M4F57K4SrI5porXkjH/4b1k2SKJtxfn4/lksTr3vgyLYKf73FIlJzCTzw6dffh0tZiFF6v01sZe9PCwHrLhpAj9iGiqmdS5yrykFtJeq8sgp4SFWAzco21MJF6uUaDwLVgKsFgmFFo2XN4pOnH48a4CKpI/pxKiYI1vpOD3lWBA6NFvZqoZecRC4qzhAJ35GeYJliKHIbp3OC2dmpfAVAPb2i/ORGNxwytuCSLt0aiKXrMNF5rwPcgfvxXy3ZclvzcLxyD/CFY2WGUc6wpTlBtSiIJYzd4vVDDJXRFK55xysYvmrnafQ5LuWU6bdNvyTNZtzzNZt+1Dse4arC85JSGR6smXCRISfuecSYFgl3jtEkymHd9PehRwrB7fe39BgO/Xs+D92bfTW9j6bvJ08fVKXvbYHkoCFq4RgEcpoXJ4wf4/3seTf8h771pvGbyfyBgLnCaAXvIZfC+w3HJxb7APWcs+iktKdyVLm2UNIUoipnYPIHi3dygOCVRNM92RkDBU0/RqWqtuHVSsUUesUc/mYLs9YjmHEss2xFqmd0A6kmp7WED5/NZEcgyR/JcWyVzJGv+oq5RJkii14GUnpaZer21Jc/zuVjLuWdL6BBgfSoCevbzxL/n/FRj3FbH/VIGRocA8FZB4oJ77JAtixKI3ooX10lpMDS0MljELZ+qsQy32FG02JNixCUWvCTf4BvJEfqNX+F3jVjXeeWXzOGt2HudlKyPyph4JrdtGT32TapT37KfYhqciwE+Qoo8XIMUIPxVPc4XD1imPqX9DXe+J6k3AC5wkj+2zoT7F9QyXnDDZsFdVLlbvq37HNkXm+r7moYsRyu2E8rslZkGOEQosoV5Cq2FrNWDz5EN7nZl8r+PqImbt8YrZ59veP7zt7f1sv5+FX5s17e6rzfOtaXdruoNZ03joP7QmNOtj12J4fa7tnvwE7Vtdd5s4EP01Pmf3oTlI4suPsZOm3U27aewmbd8wKIYNRi6I2O6vX2EEBklZY5/g4Lh5iTSIQbp3GM2McA8NZ8ur2Jn7n4iHwx7UvGUPXfQgBJreZ/8yyaqQGHoumcaBx2UbwSj4hYuBXJoGHk5qAykhIQ3mdaFLogi7tCZz4pgs6sMeSFh/6tyZYkkwcp1Qlt4HHvVzqQ2tjfwDDqZ+8WRg8hVPHPdxGpM04s+LSITzKzOnUMPXmPiORxYVEbrsoWFMCM1bs+UQhxmuBWL3H1f34fWjefXXl+Sn83Xw9/jz3btc2ftdbikXF+OI7q/aH7vjxXl4i6O7wSNBqO8u+S3akxOmHEm+VroqoF2DgzMlWg8NFn5A8WjuuNnVBbMmJvPpLGQ9wJoJjcljSQGDaNBw7nyNTzimeFnhlK/lCpMZpvGKDSmuIj53brJQ5/3Fhn+jz2V+hXtkcaHDbW5a6t6gxxocwB3ARBKYI5LGLpYgZZY0z5ruKgwYtvF2XCc5CdeTUlDa7T8pZVowl7cGd78Ot23JcKvQBqAttHUJ7bETTzF9C2gboGtoG7JtryKXSa4JmUuQs2VSlWMYkpAw/C/WXhYNHoIwFEROGEyjjCyG6JqqDLSAOfpzfmEWeF72GCWHdV/VFjum4HkMmRxTQQ5sixtruxPHkXeebbMZsqGTJIFbp2c3qLBX241loKo+WIFEIYtx6NDgqb6Hq+DhT7ghAZve5i0x9DOjRoUpYpysHTC/r7o5blVlia8SXXsXSdWasXLp+5NonyiJEG1FvimJsirJHlomsb+dxI2LAvuEU1WX2YPoYf3X5r5v9WuIIk12drqlMA+9LW8H2o9ZDw6yXjdbRSgLVK8g0FoDGbw5kA1D6xrIUAK5SBi0YRjgSI5ljwxzYNcxt5ACdKAAHbWGuZw2nOo+a5svts9aYk7d8j4L5HTkNFhUxKki9PuHvJJBtM2inLjc3gwlIt92Mgmsuou0gSLAOmQ2CeRM5PRYMeyusdIgtdjm8fAyoN8y4Nh7n/e+V65cLDmm685qL4Bz39MgeO+IOxWrmhJ/jbdEKCgSA5iWXSlsEK8fg3l0a7dFonmIrDY1DxMar2se8tFEnEZ/lHwk6aQHmWotnweTYOr/eWI+X6+TDRQ1d6Aqurfm86Gcq9RYYxwJrDEeT401EzWgzbIOSZsqOTHDjAMveGLNadYcpRNGg0OzZL92lJKPZE+uDD4xToFQqrEVlB40+oJmE0ovqY9jnM5+MyrH02bXGG1waHbspfq+Imc5aKkeNjjUOnKQ9dcHWc4Mb2Li4iRhwk8sRiAea9ziJA1pcmJuBwnBAVKk8Yf1O8WEKmx9nXt5GFB8v8JZOz2yrDPxHExBFzIOSZecV5d0lWc1v+nim7ySLnh2UMLg9i2njUIIAzdeVW7Kut+r1za3rXstFVD4XlAmhNvCn44WWox9Cy26kMTrtqDomUILswdnVRk2zwYkkjW+QCkGyaWYgxjoCxtaxwxIL851C96NfQ1IVNTvnAG9wAn2EXu4IqfY6uL6nTZQY99jW90UFDX8xO15A204YVv733mZwonZruOLT2afP67ebbw4f2E8a+SI7Ptuxv7UuvkBJ++CMfn3Uvvxzf14p/g5RRnTv8YHPVIw1/Q9fL6Oo9VZaPODHtbd/LgmZ2jz6yV0+R8=7Vxbd6I6FP41PtoFhOuj2jr1zFjbOr3oy1mIUahIHMCqfTi//SQQlJsa26qo0zWrJRsIw97f/vbOTkIJ1MbzH64+MZuoD+2SwPXnJXBdEgSeEzX8h0gWkUQSQ8nQtfpUthK0rQ8YXUilU6sPvcSFPkK2b02SQgM5DjT8hEx3XTRLXjZAdvKpE30IM4K2odtZ6YvV981QqkrcSn4LraEZPZnn6JmeboyGLpo69HkOcmB4ZqxH3dBLPVPvo1lMBG5KoOYi5IdH43kN2kSvkcY6wsNtxX2ZP92Zzdn0X69lvw3LYWf1XW5ZvpwLHf/TXT8/NH/fPFbE22Zt6nTrlvHDmtNbuHfdnlJN0nf1F5FqA+VA0glXAtWZafmwPdENcnaG0YRlpj+2cYvHhwPLtmvIRi5uB6oEVfoE6PpwnjLWlvfhl0rGwIVoDH13ge+jvQiRCRcpY89iAFCozIwbPzKoTkE3XPa9Uh8+oBrM12bj58/RU/WpbjxKgLt2TH5SeY0MtUmb0OlXCN5xy7B1z7OMpALh3PJfY8cdovcribau59QMQWNBG7uoE/ul7g6hv+EtlPA62E+4XNYOMT1LOWqOZC60dd96TzpqnurpE+6RhV9jaWZeSpoZiCnreWjqGpDeFcd/qqOlH0cdSamOQsVkOgqQsHztL4BDYkAH1nmbNpHrm2iIHN2+WUmrSW9cXfMLoQmFzRv0/QWlaH3qoyTAdgNLqNwNbwUYQSUygooZLV9yVJAxRWM8wcrEsrY1dLB+Be4W6n3obqBDfjsd7qbsDEeu5z5VSmCZl7Pct6S5uFPKe6M++RzRLTKiWyoUusX16C4yqEUtSdAFALVyjqCWGEEtFwrU2ehZw3olmH7WbYsQtu6Q3w3nnbZDrHvFBLusFg7sWiHAjvXsLl5XWS9pdqKslzRWOXDQ+lwSvNVJZEYn4fnv9pL87FXjk3hR2JJXPNbQF7HLJuQCb8NjuFRiwaUGlensXFA23oAPwv/Ct6bSYhaqdygXrL/0HrSTAMPsMHTwsYGBgkMhqBKntQzdrtATY6vfD7EMPetD7wX9EYhR5eHOpWpJus4F3WbXyg6CaUGEPqUUryzk0QZ3xalASWi8TN30swCLLkGDgQf3MvIRGKoMpxdGmRmCK1QcFdWMMTowGySL4zxL9HzZecrcFR45yUV3l+xI6jrIZ2YQ/xpYGPIE4STfWQS5wZosx0Tj3tQ7XoajiqkMB+RkOOCg9TnhLEswCiMRgWLxUBbmBeehCDzfwkOprKnonASyFbOSINtYX9W+9Y4Ph+SQJGGhED8kJj+Gk0WF9FXxvBM7k19IL8jYg9Wh1e926PwcX9SUqyRcgXbY0nm2DNPATLEs1RYt8oHUIApw2cin5gQ+dV9xD5xlHUtldBPtQG6SqslL3GGdJJvXL6u+T453irMaonLsmpgoHiV0rQlD/GHDkMbqX6wTxl+dCVZSM/6ZqeA9e1i26nSS8yrHd6vI9ucVj6JCzxFqxl8r/GfLc49wprskXLSnvbHl+0WFNs+liwxHn0aJoHxm2GZdPMQLhcK2kJ0LP25ETwwrV+H9MBGd3YrfXitaN30EUiGdZwvpu85rZeapwCHmqfhsnIuVcl1oQKK64Hwgjmq5oJ6l22MPajGLpmpWefVc8aD1XPHIznzUKlHEtMXNzzNBdt8rLLOh97erGyMs+oHQKaQzIl+0dEYElxwvWVe3HszF5FS+Cw5cZOKz8wD3U8fyTCyr6qfoYsIeR8NPHnRbvTey4UXg7HAWi9z6DF1rsEiVDeypYfUNU3f98JYGtYNvdpvC3POvX7yH8MrNtXI59v7UpDEjxFScZwH8b4Acnzomz9N2tKWjRABHfrDcw3dazhBLAWn5LhrBFpH5RLM8t7s/7lApBHLCiKoCMkbU5KwNFWFnG+JmzIwbzVozYRBsBlhXAleZYtpziTawU5BZOa5m6s4wmuNcZ+4yNvft80ipdM1XFnMrF2BuoIgFNPdmO/6H7fgxrvm1udBnsaPKasd45OTVHAvm2ybJvrpr0FuCILo1ons4rvjRxqVoYxeJvsvdTD0bYfAHorplL2k8tqPpiuMAL8RnY1dzs/eYEbFpyFR+MgEoc7HbchKB9I3fnAEorKu8lE/smlpuQkyMliLhbqnC+gExewrwCaqzBviXA2fLwWtptUSJTNGSfZypNUrrHAa+dZu/P0Yv/zy3OiwOo10A8UnRkOaEiM/GdpSu1cZb76HMYEeVY7XjX+I7JPGxLkJRWKfXz4P4GJP24ajbNLtQaz5UZBYnEL5CZusQLnGnQnOpndzROoAtLBctZT4Gy4nYwI7VgvPH4Q2LgcGFsJysiaIIBFVTT4vvVNZqqsK6Le48+C53hcM6pzDsbtPSfk0rr9OfLE4hHiiFC66jHfNbuS64A2M8K4n1PQh+9lmoUlKkyOdNt4hZXEWlkf2wYsPBJ+J44DD8EClx+MglH2nZCBAPdZuPd3B0bzoeC0CkC8jxZYk/uRy/iu340pOa7WlbYLEjc03yxKPfacW8KHXfvtCc9fsG5xHzmro7KsX2j5PnClydljX6m12jNek2u8q8/CJe1Vlc4xLqt4qqnRzFPWM7jv80fjVuJn9Y7Pi3fltEilNZP3ahXlb99t6dku/cca2gPnvLUp/VsEM8d64mPX94x+IQl1Cf1YTkXvxTIDbjT7fZK5uPVqs1Z7Cj9rc+W0hiY94keFn12bBeEWx1qgaGx+ymB8tTSkI1mcWx0V4Hu4v8Xgei+MQyH6/xrO5ylpVcnudSKzNOoJbLYxPXXzv3//hig8XEzMX6y2TEZQlY40T1SOzIvHeNdSvOebDjXTBTv4cJLI15fuMsaU9OrcqMvoZ5VNK7cbaUKna18ZdKFSdvY16W0itHD2llLFp9ajwkhdW33MHN/w==5VpdV6M8EP41vXRPAqUtl9rWj3P86K67x1dv9qSQQjQQDMG2/vo3gSCf1lqrrWe9kUyGgTzzzGQmtGMOg8UJR5F/wVxMOwZwFx1z1DEMCLq2/Kcky1xidTOJx4mrZYXgmjzjXFFLE+LiuKIoGKOCRFWhw8IQO6IiQ5yzeVVtxmj1qRHycENw7SDalN4QV/iZdGCBQn6KiefnT4ZAz0yR8+BxloT6eSELcTYToNyMVo195LJ5SWSOO+aQMyayq2AxxFThmiN2PT4fJ/dD93Hi9/2jx8mjMWEHmbHj99zysjiOQ7Gx6cNxd+CeUfE4t9njYhksEXIODL00scyRxK4EVg8ZFz7zWIjouJAepWhhZRXIUaFzzlgkhVAK77EQS80SlAgmRb4IqJ6Vq+DL//T96eBWDX5Y+XC0KE+Olnq0Jgwarpgl3NGLurzD4Z+Zc3N7B/4KByx/To2hhgsIxD0sVmCk7SlgSmTTIJ9gFmD5klKBY4oEeaoyEmliey96hYvkhfZSOvwTY341vVfhYQCKpphmt/7mKIyRIwgLM5s0cYjr+IiLTP1Me9h7uLvw77B98fOwl2nmvn99/VWulHhQ8tfcJwJfRyhFcy4zSIUESgNxR3vbUo6asVDoMYR6PGSU8dS0CdI/KY+lTRJ6UmqqkeDsAV8pmVAuh+D9Tn/CXODFSjfls3lY65RnQo3ZvMgfthb5pdTRHbzbsXJY8u07orO3i+jcPMpa17BulFlfFGXrIa/f+gnRRD9pgngsdwbQiMaSd6pB0RY2G+O8PrFNWCU27IEGsV/IX2Z2rrd1MM3dbjLFvnJbntv6JrNq83iT/mCv6N9t0P9WVndtXjzP9qky8ogSL5TXjkQQy5R/pLhLZK12qCcC4rqZk3FMntE0taewjxgJRboW66hjjVq9sYphjSB5KTj1QyqVW1vwHIAfEPT7lQDKi6S1odfWJ2o1JRU2m8WSA3XfvLzE5u6CO9kovlmEwa3Xcemth5yjZUlBc7hJgzw996vpuduvle01fQgGg1U3yIvsFbbLqH4jA1yyPU4AeQRsJQPYwLYrkO9/Amh46yzOYFPPZzP1EqqtJjK+ZTi7qvGlzHnomMfNIsZnwTSJd1jAAKuK/sBqFjBmSwED4WdVMHDHJcxX9cmrKvPvkmCtbq2xA/YX5Evrm1VMcJsl06BnGd8rYTbddYqeVHs3kzGrUieiHCNXLXOKcahQoYgEKpr3MGN27VrL121p+UBbxgSfljFBA6bvd3bRXzPz2dtOfB+Cvlm6jXDEYqIiS8YXS1/EIRHBWbA5joRdNFm9D4cZg+6+HWbAnRyZb5nY9s629A9hbzeY/QvPEVfl7HUyDYhQu+de8hgCo9rFfSaRX/uQMA7dzNb7PyCsYMe/+wUB1JJTW6PyhZ8Q7Mu+Efds/3QijJuDoTGZLY5bDrIPFRicOGm1g9NecM4SmvaEShZKWSqdSbRljvBJs46NfRSpS1miUpaIt+MqwlxWT2lxm980KUSf5qJe7bClv2bQDbawe+C/1IPPEb8ySASdE352FrHdfG/dfPNoXcO6X3T6u9o7Vr11KRCGLFRMUkznLMgIj1W/JYv9hEuwnGVRMVEmaVuZ2cdtps73Ly2XWmFvnky9co74z56lrGLrm7FT9OCGNr1pz52fntRbSbvGjGyJ+q6CHA1Djcq9bijDYIWhj5wCyGHxw5xMvfjlkzn+Hw==5VhdU6MwFP01POoAgaqPtlZ3R3d0ts7sc4BbyBhIJ4R++Os3KeEzONZVnLr2BXJJbsg599x7i4Vm6faG41Xyi0VALdeOtha6slzXsb0LeVGWXWXxvdIScxJpW2NYkGeoJmprQSLIOxMFY1SQVdcYsiyDUHRsmHO26U5bMtrddYVjMAyLEFPT+odEIimt577d2H8AiZNqZ8fWTwIcPsWcFZneL2MZlE9SXLnRU/MER2zTMqG5hWacMVHepdsZUIVrhdht+pM4cjMes+y68Cfp+vH2pHR2/ZYl9eE4ZOJjXbul6zWmhUZyLhLgUKT6zGJXQSyPv1K34Y6SLAJuoekmIQIWKxwq+0aGlrQlIqVy5MjbQMEK0V1QG2qw7wshvYC2H3g+jcMauIBti3d93htgKQi+k1P0Uxdp6nRYu+d6vGlipI6EpB0f1UKs4zKufTcIyxsN8hsARwbgiyLIBccC/gfEvcnRIe4ZiF+GgvEX0S5SWk5AU3VuIpPMHQ6APrCcCMIyOSVgQrBUTqDqwbTGeMaoWie9oeX+1/JxSUms1grW44yVxMzqzGiPyI/jd/nxfZMfNEDPZCx2JgY7BjGQRZeqRCglUJznJOwC+DasIOpUEhOpFhL+ABKVjQPFgqy79WcIHr3DAyPy9RoiLnpEuD2Ec1bwEPSqdlrvOXK9riOv70hgHoMwHO3Zqo/97wRWHUCLQedUjheSNnm5Y+GTvDxuTVolEXthddnEWiehZBP4gIBSEkXKx5RDTp5xsPenNLNSR9wf2p9a/tXhkVHFYF9FdbuiN+nU/SF12ae263Xz3/uCpZrClsscxqHPMehzFX3zlKgXmK9Bh81QqsyYgAOK0gs1KML86V6uIkKBJbHzP7EVqPXXLkzOgN7Px8p8jtl7TWVvLbOTa/+W8bIzYOdlddfwvYL6WECiXr7x7QEg3c8sIY7ZU32PGoLcV1L/oTUE9boCrx/0Y9cQs0dDp3sZ4OjFJHQ89aMOwA8oIM7ki9UP/5uKz+uLr6+ZQ8XnnfUc9f8JjS0+swX3Wg2c6tzsB87Y8pgVWEXhuxV4IiXYbRSOXoFnBn++4u+G4/0bXMumITe4+4ItXP/bwpgtnBw2H/ZKppovp2j+Fw==3ZnLcpswFIafxstmEEIOLGPHSWbaTtx60aY7GVRgAsgjhI3z9BVG4ibqOB5fszL6EQfrO7+kI3sAx3H+yPAi+E49Eg1Mw8sH8H5gmsCwHPFRKGulIKtUfBZ6UquFWfhGVEepZqFH0lZHTmnEw0VbdGmSEJe3NMwYXbW7/aVR+60L7BNNmLk40tVfoceDUrWRUetPJPQD9WZgyDtz7L76jGaJfF9CE1LeibEKI7umAfboqiHByQCOGaW8vIrzMYkKrorY16c0c//APGEPy2ecvC3Wyx9fymAPH3mkGhwjCT9saLMMvcRRJklOafQqxsnlmPlaIRbDXxSX7joKE4+wARytgpCT2QK7hb4S1hJawONItIC4nBdYifdtXgkV7OeMiyhE6juOT3JYEsZJ3si7HO8joTHhbC26yLu2zJx0tXmLyvaqtogJZZ+gYQ+oRCxt6Veha8DiQjL+AG+o8f4MnC0TtUEr8E3QqA+0eSzQlgZ6wgPCSBYL9WeW8DAW89wQS2EWES0HrCQqsb2D/2hUrTZVC/VQBX1UjwUVaVBn2TzlDHNytVQRODfVoUZ1nDExbLf41pPcDXDiXy9f6Jybr/3+mksS766oQoplN8JpGrqCUMox47q8N8OUZswl72/G4rU+2RZPbiLEa9VEekYaxPsWX6UxEmEeLtuVVF8W5BumNBTDrRIOUXuXRUYnkeW45VPNAqUTyDLagar1TgUqwWiBNqaohr2/T5ytq9sIp2JqmcY4wGEiPu8+w8Ztd5Cb+lwFdo91zKPt22pl2DELo8+QBWA47TRA6/x5AFoeGgXUlN7VSbj+BHSnwbCn0jo1f/1kNhIn77IQEAv2Or3M7d8GHSv3HAXAsAclsI6GcodD154FAMlD/ltcGzeOM5Ttl00bQUu27wsyhmqsG40pYaKe48UU2WgHLifgjuWEstqF1BPDzmJoWc4N2q+i6M7rnlBHrimAfg49mPcS8dVK8yHVfGneq423aTWd13hMOXarWw/sTLW5XJs17Y41h90zy87G7ARCJy51gX6SP7ctD22xXQ9TzmU5DP2nEvyow5xuZe+c2GH6rxqH3nRBe8M96QK2q7tUNXQh9gJGpzyzbvf0FzCGnUjdUnhvg4lm/adK2b3+1wpO/gE= \ No newline at end of file diff --git a/polkadot/diagrams/bridge-relay.svg b/polkadot/diagrams/bridge-relay.svg new file mode 100644 index 00000000000..2907a7c7fce --- /dev/null +++ b/polkadot/diagrams/bridge-relay.svg @@ -0,0 +1,3 @@ + + +
Source
Source
Target
Target
Sync Loop
Sync Loop
Source Client
Source Client
RPC
RPC
RPC
RPC
run(source=sub, target=eth)
run(source=sub, target=eth)
run(source=eth, target=sub)
run(source=eth, target=sub)
Substrate Sync Loop
Substrate Sync Loop
Ethereum Sync Loop
Ethereum Sync Loop
Process Method Results
Process Method Results
Update Target Methods
Update Target Methods
Update Source Methods
Update Source Methods
Target Client
Target Client
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/diagrams/cross-chain-fund-transfer.svg b/polkadot/diagrams/cross-chain-fund-transfer.svg new file mode 100644 index 00000000000..5fd9ced1d43 --- /dev/null +++ b/polkadot/diagrams/cross-chain-fund-transfer.svg @@ -0,0 +1,3 @@ + + +
Ethereum
Ethereum
Substrate
Substrate
Actor
Actor
1. Send Lock Tx
1. Send Lock Tx
2. Emit Event
2. Emit Event
Bridge Relay
Bridge Relay
3. Read Event
3. Read Event
4. Send Tx Proof
4. Send Tx Proof
5. Grant Funds
5. Grant Funds
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/diagrams/currency-exchange-pallet.svg b/polkadot/diagrams/currency-exchange-pallet.svg new file mode 100644 index 00000000000..1f1b2ef7b5c --- /dev/null +++ b/polkadot/diagrams/currency-exchange-pallet.svg @@ -0,0 +1,3 @@ + + +
Transaction
Transaction
Parse Transaction
Parse Transaction
Yes
Yes
No
No
Is part of a finalized block?
Is part of a finalize...
Yes
Yes
Have funds already been claimed?
Have funds alrea...
Deposit into recipient account
Deposit into recipie...
Reward Submitter
Reward Submitter
End
End
A price feed would be needed for this
A price feed would b...
Convert from foreign currency into local currency
Convert from foreign...
No
No
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/diagrams/ethereum-pallet.svg b/polkadot/diagrams/ethereum-pallet.svg new file mode 100644 index 00000000000..934255be226 --- /dev/null +++ b/polkadot/diagrams/ethereum-pallet.svg @@ -0,0 +1,3 @@ + + +
Import Signed Header
Import Signed Header
Import Header
Import Header
Count Valid and Invalid Headers
Count Valid and Inva...
No
No
Yes
Yes
Did we finalize any headers
Did we finalize any h...
Yes
Yes
No
No
Is Signed
Is Signed
Import Unsigned Header
Import Unsigned Head...
Import Header
Import Header
Reward Submitter
Reward Submitter
Did we receive valid headers?
Did we receive valid he...
Track Good Submitter
Track Good Submitter
Punish Bad Submitter
Punish Bad Submitter
Verify Header
Verify Header
Check for Authority Set Changes
Check for Authori...
Check if new header finalizes old headers
Check if new head...
Header
Header
Import Header
Import Header
Insert Header into Storage
Insert Header int...
Mark Headers as Finalized
Mark Headers as F...
Prune Old Headers
Prune Old Headers
Imported Block Hash + Finalized Headers
Imported Block Ha...
New Header
New Header
End
End
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/diagrams/general-overview.svg b/polkadot/diagrams/general-overview.svg new file mode 100644 index 00000000000..d7706893ab9 --- /dev/null +++ b/polkadot/diagrams/general-overview.svg @@ -0,0 +1,3 @@ + + +
Bridge Relay
Bridge Relay
Solidity Smart Contract
Solidity Smart Contract
Grandpa Built-In
Grandpa Built-In
Ethereum PoA Network
Ethereum PoA Network
Substrate Node
Substrate Node
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/diagrams/parachain.svg b/polkadot/diagrams/parachain.svg new file mode 100644 index 00000000000..a1a15f172cf --- /dev/null +++ b/polkadot/diagrams/parachain.svg @@ -0,0 +1,3 @@ + + +
Polkadot
Polkadot
Ethereum Runtime Module
Ethereum Runtime Module
Substrate Runtime Module
Substrate Runtime Module
Currency Exchange Runtime Module
Currency Exchange Runtime Module
Substrate Based Chain A
Substrate Based Chain A
Substrate Based Chain B
Substrate Based Chain B
Ethereum PoA Chain
Ethereum PoA Chain
Bridge Relays
Bridge Relays
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/polkadot/docs/high-level-overview.md b/polkadot/docs/high-level-overview.md new file mode 100644 index 00000000000..14b1eee6d45 --- /dev/null +++ b/polkadot/docs/high-level-overview.md @@ -0,0 +1,177 @@ +# High-Level Bridge Documentation + +## Purpose + +Trustless connecting between two Substrate-based chains using GRANDPA finality. + +## Overview + +Even though we support two-way bridging, the documentation will generally talk about a one-sided +interaction. That's to say, we will only talk about syncing headers and messages from a _source_ +chain to a _target_ chain. This is because the two-sided interaction is really just the one-sided +interaction with the source and target chains switched. + +To understand the full interaction with the bridge, take a look at the +[testing scenarios](./testing-scenarios.md) document. It describes potential use cases and describes +how each of the layers outlined below is involved. + +The bridge is built from various components. Here is a quick overview of the important ones. + +### Header Sync + +A light client of the source chain built into the target chain's runtime. It is a single FRAME +pallet. It provides a "source of truth" about the source chain headers which have been finalized. +This is useful for higher level applications. + +### Headers Relayer + +A standalone application connected to both chains. It submits every source chain header it sees to +the target chain through RPC. + +### Message Delivery + +A FRAME pallet built on top of the header sync pallet. It allows users to submit messages to the +source chain, which are to be delivered to the target chain. The delivery protocol doesn't care +about the payload more than it has to. Handles replay protection and message ordering. + +### Message Dispatch + +A FRAME pallet responsible for interpreting the payload of delivered messages. + +### Message Relayer + +A standalone application handling delivery of the messages from source chain to the target chain. + +## Processes + +High level sequence charts of the process can be found in [a separate document](./high-level.html). + +### Substrate (GRANDPA) Header Sync + +The header sync pallet (`pallet-substrate-bridge`) is an on-chain light client for chains which use +GRANDPA finality. It is part of the target chain's runtime, and accepts headers from the source +chain. Its main goals are to accept valid headers, track GRANDPA finality set changes, and verify +GRANDPA finality proofs (a.k.a justifications). + +The pallet does not care about what block production mechanism is used for the source chain +(e.g Aura or BABE) as long as it uses the GRANDPA finality gadget. Due to this it is possible for +the pallet to import (but not necessarily finalize) headers which are _not_ valid according to the +source chain's block production mechanism. + +The pallet has support for tracking forks and uses the longest chain rule to determine what the +canonical chain is. The pallet allows headers to be imported on a different fork from the canonical +one as long as the headers being imported don't conflict with already finalized headers (for +example, it will not allow importing a header at a lower height than the best finalized header). + +When tracking authority set changes, the pallet - unlike the full GRANDPA protocol - does not +support tracking multiple authority set changes across forks. Each fork can have at most one pending +authority set change. This is done to prevent DoS attacks if GRANDPA on the source chain were to +stall for a long time (the pallet would have to do a lot of expensive ancestry checks to catch up). + +Referer to the [pallet documentation](../modules/substrate/src/lib.rs) for more details. + +#### Header Relayer strategy + +There is currently no reward strategy for the relayers at all. They also are not required to be +staked or registered on-chain, unlike in other bridge designs. We consider the header sync to be +an essential part of the bridge and the incentivisation should be happening on the higher layers. + +At the moment, signed transactions are the only way to submit headers to the header sync pallet. +However, in the future we would like to use unsigned transactions for headers delivery. This will +allow transaction de-duplication to be done at the transaction pool level and also remove the cost +for message relayers to run header relayers. + +### Message Passing + +Once header sync is maintained, the target side of the bridge can receive and verify proofs about +events happening on the source chain, or its internal state. On top of this, we built a message +passing protocol which consists of two parts described in following sections: message delivery and +message dispatch. + +#### Message Lanes Delivery + +The [Message delivery pallet](../modules/messages/src/lib.rs) is responsible for queueing up +messages and delivering them in order on the target chain. It also dispatches messages, but we will +cover that in the next section. + +The pallet supports multiple lanes (channels) where messages can be added. Every lane can be +considered completely independent from others, which allows them to make progress in parallel. +Different lanes can be configured to validated messages differently (e.g higher rewards, specific +types of payload, etc.) and may be associated with a particular "user application" built on top of +the bridge. Note that messages in the same lane MUST be delivered _in the same order_ they were +queued up. + +The message delivery protocol does not care about the payload it transports and can be coupled +with an arbitrary message dispatch mechanism that will interpret and execute the payload if delivery +conditions are met. Each delivery on the target chain is confirmed back to the source chain by the +relayer. This is so that she can collect the reward for delivering these messages. + +Users of the pallet add their messages to an "outbound lane" on the source chain. When a block is +finalized message relayers are responsible for reading the current queue of messages and submitting +some (or all) of them to the "inbound lane" of the target chain. Each message has a `nonce` +associated with it, which serves as the ordering of messages. The inbound lane stores the last +delivered nonce to prevent replaying messages. To succesfuly deliver the message to the inbound lane +on target chain the relayer has to present present a storage proof which shows that the message was +part of the outbound lane on the source chain. + +During delivery of messages they are immediately dispatched on the target chain and the relayer is +required to declare the correct `weight` to cater for all messages dispatch and pay all required +fees of the target chain. To make sure the relayer is incentivised to do so, on the source chain: +- the user provides a declared dispatch weight of the payload +- the pallet calculates the expected fee on the target chain based on the declared weight +- the pallet converts the target fee into source tokens (based on a price oracle) and reserves + enough tokens to cover for the delivery, dispatch, confirmation and additional relayers reward. + +If the declared weight turns out to be too low on the target chain the message is delivered but +it immediately fails to dispatch. The fee and reward is collected by the relayer upon confirmation +of delivery. + +Due to the fact that message lanes require delivery confirmation transactions, they also strictly +require bi-directional header sync (i.e. you can't use message delivery with one-way header sync). + +#### Dispatching Messages + +The [Message dispatch pallet](../modules/dispatch/src/lib.rs) is used to perform the actions +specified by messages which have come over the bridge. For Substrate-based chains this means +interpreting the source chain's message as a `Call` on the target chain. + +An example `Call` of the target chain would look something like this: + +```rust +target_runtime::Call::Balances(target_runtime::pallet_balances::Call::transfer(recipient, amount)) +``` + +When sending a `Call` it must first be SCALE encoded and then sent to the source chain. The `Call` +is then delivered by the message lane delivery mechanism from the source chain to the target chain. +When a message is received the inbound message lane on the target chain will try and decode the +message payload into a `Call` enum. If it's successful it will be dispatched after we check that the +weight of the call does not exceed the weight declared by the sender. The relayer pays fees for +executing the transaction on the target chain, but her costs should be covered by the sender on the +source chain. + +When dispatching messages there are three Origins which can be used by the target chain: +1. Root Origin +2. Source Origin +3. Target Origin + +Senders of a message can indicate which one of the three origins they would like to dispatch their +message with. However, there are restrictions on who/what is allowed to dispatch messages with a +particular origin. + +The Root origin represents the source chain's Root account on the target chain. This origin can can +only be dispatched on the target chain if the "send message" request was made by the Root origin of +the source chain - otherwise the message will fail to be dispatched. + +The Source origin represents an account without a private key on the target chain. This account will +be generated/derived using the account ID of the sender on the source chain. We don't necessarily +require the source account id to be associated with a private key on the source chain either. This +is useful for representing things such as source chain proxies or pallets. + +The Target origin represents an account with a private key on the target chain. The sender on the +source chain needs to prove ownership of this account by using their target chain private key to +sign: `(Call, SourceChainAccountId).encode()`. This will be included in the message payload and +verified by the target chain before dispatch. + +See [`CallOrigin` documentation](../modules/dispatch/src/lib.rs) for more details. + +#### Message Relayers Strategy diff --git a/polkadot/docs/high-level.html b/polkadot/docs/high-level.html new file mode 100644 index 00000000000..3c4c6178c95 --- /dev/null +++ b/polkadot/docs/high-level.html @@ -0,0 +1,55 @@ + + + + + + High Level Bridge Components + + +

Header Sync

+

Header pallet on the target chain, keeps track of the forks, but requires finality for blocks that perform authority set changes. That means, it won't sync a fork with authority set change unless that change finalized.

+
+ sequenceDiagram + participant Source Chain + participant Relayer + participant Target Chain + Note right of Target Chain: Best: 0, Finalized: 0 + Source Chain ->> Source Chain: Import Block 1 + Source Chain ->> Source Chain: Import Block 2 + Relayer ->> Target Chain: Submit Block 1 + Note right of Target Chain: Best: 1, Finalized: 0 + Relayer ->> Target Chain: Submit Block 2 + Note right of Target Chain: Best: 2, Finalized: 0 + Source Chain ->> Source Chain: Import Block 2' + Relayer ->> Target Chain: Submit Block 2' + Note right of Target Chain: Best: 2 or 2', Finalized: 0 + Source Chain ->> Source Chain: Finalize Block 2' + Relayer ->> Target Chain: Submit Finality of Block 2' + Note right of Target Chain: Best: 2', Finalized: 2' +
+

Message Delivery (single lane)

+

Pending messages are stored on-chain (source) so the relayer code is completely stateless - it can read all the details from the chain.

+

Delivering pending messages requires finality first.

+
+ sequenceDiagram + participant Source Chain + participant Relayer + participant Target Chain + Source Chain ->> Source Chain: Queue Message 1 + Source Chain ->> Source Chain: Queue Message 2 + Source Chain ->> Source Chain: Queue Message 3 + Note left of Source Chain: Queued Messages: [1, 2, 3, ] + Note left of Source Chain: Reward for [1, 2, 3, ] reserved + Relayer ->> Target Chain: Deliver Messages 1..2 + Note right of Target Chain: Target chain dispatches the messages.
To Confirm: {1..2 => relayer_1} + Relayer ->> Source Chain: Delivery Confirmation of 1..2 + Note left of Source Chain: Queued Messages: [3, ] + Note left of Source Chain: Reward payout for [1, 2, ] + Relayer -->> Target Chain: Confirmed Messages 1..2 + Note right of Target Chain: To Confirm: {} + Note over Relayer, Target Chain: (this is not a separate transaction,
it's bundled with the "Deliver Messages" proof) +
+ + + + diff --git a/polkadot/docs/plan.md b/polkadot/docs/plan.md new file mode 100644 index 00000000000..9c4106d9ade --- /dev/null +++ b/polkadot/docs/plan.md @@ -0,0 +1,22 @@ +Plan for the Internal Audit: +1. High-level overview (describing layers, maybe with pictures) + - what have we done already. + [Tomek to present] + [Hernando to help with diagrams today] + +2. Demo? How to play with the network. + [Hernando] + +3. Demo of token transfer on Millau. + [Hernando] + +4. Go through the scenario description and let people ask questions in the meantime. + Jump to the code on demand. + [Tomek, Hernando, Slava] + + ... + +5. The roadmap + - outstanding issues. + [Tomek] + diff --git a/polkadot/docs/poa-eth.md b/polkadot/docs/poa-eth.md new file mode 100644 index 00000000000..43b30f8bb73 --- /dev/null +++ b/polkadot/docs/poa-eth.md @@ -0,0 +1,71 @@ +# PoA Ethereum High Level Documentation + +NOTE: This is from the old README + +### Ethereum Bridge Runtime Module +The main job of this runtime module is to keep track of useful information an Ethereum PoA chain +which has been submitted by a bridge relayer. This includes: + + - Ethereum headers and their status (e.g are they the best header, are they finalized, etc.) + - Current validator set, and upcoming validator sets + +This runtime module has more responsibilties than simply storing headers and validator sets. It is +able to perform checks on the incoming headers to verify their general integrity, as well as whether +or not they've been finalized by the authorities on the PoA chain. + +This module is laid out as so: + +``` +├── ethereum +│ └── src +│ ├── error.rs // Runtime error handling +│ ├── finality.rs // Manage finality operations +│ ├── import.rs // Import new Ethereum headers +│ ├── lib.rs // Store headers and validator set info +│ ├── validators.rs // Track current and future PoA validator sets +│ └── verification.rs // Verify validity of incoming Ethereum headers +``` + +### Currency Exchange Runtime Module +The currency exchange module is used to faciliate cross-chain funds transfers. It works by accepting +a transaction which proves that funds were locked on one chain, and releases a corresponding amount +of funds on the recieving chain. + +For example: Alice would like to send funds from chain A to chain B. What she would do is send a +transaction to chain A indicating that she would like to send funds to an address on chain B. This +transaction would contain the amount of funds she would like to send, as well as the address of the +recipient on chain B. These funds would now be locked on chain A. Once the block containing this +"locked-funds" transaction is finalized it can be relayed to chain B. Chain B will verify that this +transaction was included in a finalized block on chain A, and if successful deposit funds into the +recipient account on chain B. + +Chain B would need a way to convert from a foreign currency to its local currency. How this is done +is left to the runtime developer for chain B. + +This module is one example of how an on-chain light client can be used to prove a particular action +was taken on a foreign chain. In particular it enables transfers of the foreign chain's native +currency, but more sophisticated modules such as ERC20 token transfers or arbitrary message transfers +are being worked on as well. + +## Ethereum Node +On the Ethereum side of things, we require two things. First, a Solidity smart contract to track the +Substrate headers which have been submitted to the bridge (by the relay), and a built-in contract to +be able to verify that headers have been finalized by the GRANDPA finality gadget. Together this +allows the Ethereum PoA chain to verify the integrity and finality of incoming Substrate headers. + +The Solidity smart contract is not part of this repo, but can be found +[here](https://github.com/svyatonik/substrate-bridge-sol/blob/master/substrate-bridge.sol) if you're +curious. We have the contract ABI in the `ethereum/relays/res` directory. + +## Rialto Runtime +The node runtime consists of several runtime modules, however not all of them are used at the same +time. When running an Ethereum PoA to Substrate bridge the modules required are the Ethereum module +and the currency exchange module. When running a Substrate to Substrate bridge the Substrate and +currency exchange modules are required. + +Below is a brief description of each of the runtime modules. + +## Bridge Relay +The bridge relay is responsible for syncing the chains which are being bridged, and passing messages +between them. The current implementation of the relay supportings syncing and interacting with +Ethereum PoA and Substrate chains. diff --git a/polkadot/docs/scenario1.html b/polkadot/docs/scenario1.html new file mode 100644 index 00000000000..808a0c34f0d --- /dev/null +++ b/polkadot/docs/scenario1.html @@ -0,0 +1,47 @@ + + + + + + Flow Chart of Millau to Rialto Transfer + + +

Scenario: mDave sending RLT to rEve

+
+ sequenceDiagram + participant mDave + participant Millau + participant Bridge Relayer + participant Rialto + participant rEve + Rialto->>Rialto: Endow r(mDave) with RLT. + mDave->>Millau: send_message(transfer, 5 RLT, rEve) + Millau->>Millau: Locks fee & reward for the relayer and queues the message. + rect rgb(205, 226, 244) + Bridge Relayer->>+Millau: What's your best header? + Millau-->>-Bridge Relayer: It's header 5. + Bridge Relayer->>+Rialto: What's the best Millau header you know about? + Rialto-->>-Bridge Relayer: I only know about 4. + Bridge Relayer->>Rialto: Cool, here is Millau header 5 [`submit_signed_header()`]. + Bridge Relayer->>+Rialto: What's the best finalized Millau header you know about? + Rialto-->>-Bridge Relayer: I only know about 3. + Bridge Relayer->>+Millau: Do you have a finality proof for 4..5? + Millau-->>-Bridge Relayer: Yes I do, here it is. + Bridge Relayer->>Rialto: Here is the finality proof for 5 [`finalize_header()`]. + end + rect rgb(218, 195, 244) + Bridge Relayer->>+Millau: Do you have any messages for me to deliver (at 5)? + Millau-->>-Bridge Relayer: Yes, here they are. + Bridge Relayer->>+Rialto: I have some new messages for you [`receive_messages_proof()`]. + Rialto->>Rialto: Validate and Dispatch Message. + Rialto->>rEve: Transfer(5 RLT) from r(mDave). + Rialto-->>-Bridge Relayer: Event(Message Succesfully Dispatched). + Bridge Relayer->>Millau: I sent your message, can I get paid now [`receive_messages_delivery_proof`]? + Millau-->>Bridge Relayer: Yes, here you go $$$. + Bridge Relayer ->>Rialto: These messages are confirmed now, feel free to clean up. + end +
+ + + + diff --git a/polkadot/docs/send-message.md b/polkadot/docs/send-message.md new file mode 100644 index 00000000000..91d3bfd976b --- /dev/null +++ b/polkadot/docs/send-message.md @@ -0,0 +1,131 @@ +# How to send messages + +The Substrate-to-Substrate relay comes with a command line interface (CLI) which is implemented +by the `substrate-relay` binary. + +``` +Substrate-to-Substrate relay + +USAGE: + substrate-relay + +FLAGS: + -h, --help + Prints help information + + -V, --version + Prints version information + + +SUBCOMMANDS: + help Prints this message or the help of the given subcommand(s) + init-bridge Initialize on-chain bridge pallet with current header data + relay-headers Start headers relay between two chains + relay-messages Start messages relay between two chains + send-message Send custom message over the bridge +``` +The relay related commands `relay-headers` and `relay-messages` are basically continously running a +sync loop between the `Millau` and `Rialto` chains. The `init-bridge` command submitts initialization +transactions. An initialization transaction brings an initial header and authorities set from a source +chain to a target chain. The header synchronization then starts from that header. + +For sending custom messages over an avialable bridge, the `send-message` command is used. + +``` +Send custom message over the bridge. + +Allows interacting with the bridge by sending messages over `Messages` component. The message is being sent to the +source chain, delivered to the target chain and dispatched there. + +USAGE: + substrate-relay send-message + +FLAGS: + -h, --help Prints help information + -V, --version Prints version information + +SUBCOMMANDS: + help Prints this message or the help of the given subcommand(s) + MillauToRialto Submit message to given Millau -> Rialto lane + RialtoToMillau Submit message to given Rialto -> Millau lane + +``` +Messages are send from a source chain to a target chain using a so called `message lane`. Message lanes handle +both, message transport and message dispatch. There is one command for submitting a message to each of the two +available bridges, namely `MillauToRialto` and `RialtoToMillau`. + +Submitting a message requires a number of arguments to be provided. Those arguments are essentially the same +for both submit message commands, hence only the output for `MillauToRialto` is shown below. + +``` +Submit message to given Millau -> Rialto lane + +USAGE: + substrate-relay send-message MillauToRialto [OPTIONS] --lane --source-host --source-port --source-signer --origin --target-signer + +FLAGS: + -h, --help Prints help information + -V, --version Prints version information + +OPTIONS: + --fee + Delivery and dispatch fee. If not passed, determined automatically + + --lane Hex-encoded lane id + --source-host Connect to Source node at given host + --source-port Connect to Source node websocket server at given port + --source-signer + The SURI of secret key to use when transactions are submitted to the Source node + + --source-signer-password + The password for the SURI of secret key to use when transactions are submitted to the Source node + + --origin + The origin to use when dispatching the message on the target chain [possible values: Target, Source] + + --target-signer + The SURI of secret key to use when transactions are submitted to the Target node + + --target-signer-password + The password for the SURI of secret key to use when transactions are submitted to the Target node + + +SUBCOMMANDS: + help Prints this message or the help of the given subcommand(s) + remark Make an on-chain remark (comment) + transfer Transfer the specified `amount` of native tokens to a particular `recipient` + +``` +As can be seen from the output, there are two types of messages available: `remark` and `transfer`. +A remark is some opaque message which will be placed on-chain. For basic testing, a remark is +the easiest to go with. + +Usage of the arguments is best explained with an example. Below you can see, how a remark +would look like: + +``` +substrate-relay send-message MillauToRialto \ + --source-host=127.0.0.1 \ + --source-port=10946 \ + --source-signer=//Dave \ + --target-signer=//Dave \ + --lane=00000000 \ + --origin Target \ + remark +``` +Messages are basically regular transactions. That means, they have to be signed. In order +to send a message, you have to control an account private key on both, the source and +the target chain. Those accounts are specified using the `--source-signer` and `--target-signer` +arguments in the example above. + +Message delivery and dispatch requires a fee to be paid. In the example above, we have not +specified the `--fee` argument. Hence, the fee will be estimated automatically. Note that +in order to pay the fee, the message sender account has to have sufficient funds available. + +The `--origin` argument allows to denote under which authority the message will be dispatched +on the target chain. Accepted values are `Target` and `Source`. + +Although not strictly necessary, it is recommended, to use one of the well-known development +accounts (`Alice`, `Bob`, `Charlie`, `Dave`, `Eve`) for message sending. Those accounts are +endowed with funds for fee payment. In addtion, the development `Seed URI` syntax +(like `//Dave`) for the signer can be used, which will remove the need for a password. diff --git a/polkadot/docs/testing-scenarios.md b/polkadot/docs/testing-scenarios.md new file mode 100644 index 00000000000..343720524ec --- /dev/null +++ b/polkadot/docs/testing-scenarios.md @@ -0,0 +1,221 @@ +# Testing Scenarios + +In the scenarios, for simplicity, we call the chains Kusama (KSM token) and Polkadot (DOT token), +but they should be applicable to any other chains. The first scenario has detailed description about +the entire process (also see the [sequence diagram](./scenario1.html)). Other scenarios only contain +a simplified interaction focusing on things that are unique for that particular scenario. + +Notation: +- kX - user X interacting with Kusama chain. +- `k(kX)` - Kusama account id of user kX (native account id; usable on Kusama) +- `p(kX)` - Polkadot account id of user kX (account id derived from `k(kX)` usable on Polkadot) +- [Kusama] ... - Interaction happens on Kusama (e.g. the user interacts with Kusama chain) +- [Polkadot] ... - Interaction happens on Polkadot + +Basic Scenarios +=========================== + +Scenario 1: Kusama's Alice receiving & spending DOTs +--------------------------- + +Kusama's Alice (kAlice) receives 5 DOTs from Polkadot's Bob (pBob) and sends half of them to +kCharlie. + +1. Generate kAlice's DOT address (`p(kAlice)`). + See function: + + ```rust + bp_runtime::derive_account_id(b"pdot", kAlice) + ``` + + or: + + ```rust + let hash = bp_polkadot::derive_kusama_account_id(kAlice); + let p_kAlice = bp_polkadot::AccountIdConverter::convert(hash); + ``` + +2. [Polkadot] pBob transfers 5 DOTs to `p(kAlice)` + 1. Creates & Signs a transaction with `Call::Transfer(..)` + 1. It is included in block. + 1. kAlice observers Polkadot chain to see her balance at `p(kAlice)` updated. + +3. [Kusama] kAlice sends 2.5 DOTs to `p(kCharlie)` + 1. kAlice prepares: + ```rust + let call = polkadot::Call::Balances(polkadot::Balances::Transfer(p(kCharlie), 2.5DOT)).encode(); + let weight = call.get_dispatch_info().weight; + ``` + + 1. kAlice prepares Kusama transaction: + ```rust + kusama::Call::Messages::::send_message( + // dot-transfer-lane (truncated to 4bytes) + lane_id, + payload: MessagePayload { + // Get from current polkadot runtime (kind of hardcoded) + spec_version: 1, + // kAlice should know the exact dispatch weight of the call on the target + // source verifies: at least to cover call.length() and below max weight + weight, + // simply bytes, we don't know anything about that on the source chain + call, + // origin that should be used during dispatch on the target chain + origin: CallOrigin::SourceAccount(kAlice), + }, + delivery_and_dispatch_fee: { + (single_message_delivery_weight + // source weight = X * target weight + + convert_target_weight_to_source_weight(weight) + + confirmation_transaction_weight + ) + // This uses an on-chain oracle to convert weights of the target chain to source fee + * weight_to_fee + // additional reward for the relayer (pallet parameter) + + relayers_fee + }, + ) + ``` + + 1. [Kusama] kAlice sends Kusama transaction with the above `Call` and pays regular fees. The + dispatch additionally reservers target-chain delivery and dispatch fees (including relayer's + reward). + +4. [Kusama] kAlice's transaction is included in block `B1` + +### Syncing headers loop + +5. Relayer sees that `B1` has not yet been delivered to the target chain. + [Sync loop code](https://github.com/paritytech/parity-bridges-common/blob/8b327a94595c4a6fae6d7866e24ecf2390501e32/relays/headers-relay/src/sync_loop.rs#L199). + +1. Relayer prepares transaction which delivers `B1` and with all of the missing + ancestors to the target chain (one header per transaction). + +1. After the transaction is succesfully dispatched the Polkadot on-chain light client of the Kusama + chain learns about block `B1` - it is stored in the on-chain storage. + +### Syncing finality loop + +8. Relayer is subscribed to finality events on Kusama. Relayer gets a finality notification for + block `B3`. + +1. The header sync informs the target chain about `B1..B3` blocks (see point 6). + +1. Relayer learns about missing finalization of `B1..B3` on the target chain, see + [finality maintenance code](https://github.com/paritytech/parity-bridges-common/blob/8b327a94595c4a6fae6d7866e24ecf2390501e32/relays/substrate/src/headers_maintain.rs#L107). + +1. Relayer submits justification for `B3` to the target chain (`finalize_header`). + See [#421](https://github.com/paritytech/parity-bridges-common/issues/421) for multiple + authority set changes support in Relayer (i.e. what block the target chain expects, not only + what I have). + + Relayer is doing two things: + - syncing on demand (what blocks miss finality) + - and syncing as notifications are received (recently finalized on-chain) + +1. Eventually Polkadot on-chain light client of Kusama learns about finality of `B1`. + +### Syncing messages loop + +13. The relayer checks the on-chain storage (last finalized header on the source, best header on the + target): + - Kusama outbound lane + - Polkadot inbound lane + Lanes contains `latest_generated_nonce` and `latest_received_nonce` respectively. The relayer + syncs messages between that range. + +1. The relayer gets a proof for every message in that range (using the RPC of messages module) + +1. The relayer creates a message delivery transaction (but it has weight, size, and count limits). + The count limit is there to make the loop of delivery code bounded. + ```rust + receive_message_proof( + relayer_id, // account id of the source chain + proof, // messages + proofs (hash of source block `B1`, nonces, lane_id + storage proof) + dispatch_weight // relayer declares how much it will take to dispatch all messages in that transaction, + ) + ``` + The `proof` can also contain an update of outbound lane state of source chain, which indicates + the delivery confirmation of these messages and reward payment, so that the target chain can + truncate its unpayed rewards vector. + + The target chain stores `relayer_ids` that delivered messages because the relayer can generate + a storage proof to show that they did indeed deliver those messages. The reward is paid on the + source chain and we inform the target chain about that fact so it can prune these `relayer_ids`. + + It's totally fine if there are no messages, and we only include the reward payment proof + when calling that function. + +1. 🥳 the message is now delivered and dispatched on the target chain! + +1. The relayer now needs to confirm the delivery to claim her payment and reward on the source + chain. + +1. The relayer creates a transaction on the source chain with call: + + ```rust + receive_messages_delivery_proof( + proof, // hash of the finalized target chain block, lane_id, storage proof + ) + ``` + +### UI challenges + +- The UI should warn before (or prevent) sending to `k(kCharlie)`! + + +Scenario 2: Kusama's Alice nominating validators with her DOTs +--------------------------- + +kAlice receives 10 DOTs from pBob and nominates `p(pCharlie)` and `p(pDave)`. + +1. Generate kAlice's DOT address (`p(kAlice)`) +2. [Polkadot] pBob transfers 5 DOTs to `p(kAlice)` +3. [Kusama] kAlice sends a batch transaction: + - `staking::Bond` transaction to create stash account choosing `p(kAlice)` as the controller account. + - `staking::Nominate(vec![p(pCharlie)])` to nominate pCharlie using the controller account. + + +Scenario 3: Kusama Treasury receiving & spending DOTs +--------------------------- + +pBob sends 15 DOTs to Kusama Treasury which Kusama Governance decides to transfer to kCharlie. + +1. Generate source account for the treasury (`kTreasury`). +2. [Polkadot] pBob tarnsfers 15 DOTs to `p(kTreasury)`. +2. [Kusama] Send a governance proposal to send a bridge message which transfers funds to `p(kCharlie)`. +3. [Kusama] Dispatch the governance proposal using `kTreasury` account id. + +Extra scenarios +=========================== + +Scenario 4: Kusama's Alice setting up 1-of-2 multi-sig to spend from either Kusama or Polkadot +--------------------------- + +Assuming `p(pAlice)` has at least 7 DOTs already. + +1. Generate multisig account id: `pMultiSig = multi_account_id(&[p(kAlice), p(pAlice)], 1)`. +2. [Kusama] Transfer 7 DOTs to `pMultiSig` using `TargetAccount` origin of `pAlice`. +3. [Kusama] Transfer 2 DOTs to `p(kAlice)` from the multisig: + - Send `multisig::as_multi_threshold_1(vec![p(pAlice)], balances::Transfer(p(kAlice), 2))` + +Scenario 5: Kusama Treasury staking & nominating validators with DOTs +--------------------------- + +Scenario 6: Kusama Treasury voting in Polkadot's democracy proposal +--------------------------- + +Potentially interesting scenarios +=========================== + +Scenario 7: Polkadot's Bob spending his DOTs by using Kusama chain +--------------------------- + +We can assume he holds KSM. Problem: he can pay fees, but can't really send (sign) a transaction? +Shall we support some kind of dispatcher? + +Scenario 8: Kusama Governance taking over Kusama's Alice DOT holdings +--------------------------- + +We use `SourceRoot` call to transfer her's DOTs to Kusama treasury. Source chain root +should also be able to send messages as `CallOrigin::SourceAccount(Alice)` though. diff --git a/polkadot/fuzz/storage-proof/Cargo.lock b/polkadot/fuzz/storage-proof/Cargo.lock new file mode 100644 index 00000000000..e303f3a887b --- /dev/null +++ b/polkadot/fuzz/storage-proof/Cargo.lock @@ -0,0 +1,2362 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "addr2line" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + +[[package]] +name = "ahash" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" + +[[package]] +name = "aho-corasick" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" + +[[package]] +name = "arbitrary" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "async-trait" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "backtrace" +version = "0.3.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +dependencies = [ + "addr2line", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bitvec" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium", +] + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array 0.12.3", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "bp-header-chain" +version = "0.1.0" +dependencies = [ + "finality-grandpa", + "frame-support", + "parity-scale-codec", + "serde", + "sp-core", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-runtime" +version = "0.1.0" +dependencies = [ + "frame-support", + "num-traits", + "parity-scale-codec", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "bp-test-utils" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "finality-grandpa", + "sp-finality-grandpa", + "sp-keyring", + "sp-runtime", +] + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits", + "time", + "winapi", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "cpuid-bool" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array 0.12.3", + "subtle 1.0.0", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.4.0", +] + +[[package]] +name = "curve25519-dalek" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" +dependencies = [ + "byteorder", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "derive_more" +version = "0.99.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.3", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dyn-clone" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" + +[[package]] +name = "ed25519" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.0.2", + "ed25519", + "rand 0.7.3", + "serde", + "sha2 0.9.2", + "zeroize", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "environmental" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "finality-grandpa" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" +dependencies = [ + "either", + "futures", + "futures-timer", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.9.0", +] + +[[package]] +name = "fixed-hash" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" +dependencies = [ + "byteorder", + "rand 0.8.2", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "frame-metadata" +version = "12.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-std", +] + +[[package]] +name = "frame-support" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "bitflags", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "serde", + "smallvec 1.6.1", + "sp-arithmetic", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-tracing", +] + +[[package]] +name = "frame-support-procedural" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "Inflector", + "frame-support-procedural-tools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-system" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "frame-support", + "impl-trait-for-tuples", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "futures" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" + +[[package]] +name = "futures-executor" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" + +[[package]] +name = "futures-macro" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" + +[[package]] +name = "futures-task" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" +dependencies = [ + "once_cell", +] + +[[package]] +name = "futures-timer" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" + +[[package]] +name = "futures-util" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.10.1+wasi-snapshot-preview1", +] + +[[package]] +name = "gimli" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" + +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" +dependencies = [ + "ahash", +] + +[[package]] +name = "heck" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac 0.7.0", + "digest 0.8.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac-drbg" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +dependencies = [ + "digest 0.8.1", + "generic-array 0.12.3", + "hmac 0.7.1", +] + +[[package]] +name = "honggfuzz" +version = "0.5.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ead88897bcad1c396806d6ccba260a0363e11da997472e9e19ab9889969083a2" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + +[[package]] +name = "impl-codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f65a8ecf74feeacdab8d38cb129e550ca871cccaa7d1921d8636ecd75534903" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "instant" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "integer-sqrt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits", +] + +[[package]] +name = "itoa" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" + +[[package]] +name = "libsecp256k1" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +dependencies = [ + "arrayref", + "crunchy", + "digest 0.8.1", + "hmac-drbg", + "rand 0.7.3", + "sha2 0.8.2", + "subtle 2.4.0", + "typenum", +] + +[[package]] +name = "lock_api" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "lock_api" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" +dependencies = [ + "cfg-if 0.1.10", +] + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "memory-db" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cbd2a22f201c03cc1706a727842490abfea17b7b53260358239828208daba3c" +dependencies = [ + "hash-db", + "hashbrown", + "parity-util-mem", +] + +[[package]] +name = "memory_units" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "miniz_oxide" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +dependencies = [ + "adler", + "autocfg", +] + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" + +[[package]] +name = "once_cell" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +dependencies = [ + "parking_lot 0.11.1", +] + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "pallet-substrate-bridge" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-runtime", + "finality-grandpa", + "frame-support", + "frame-system", + "hash-db", + "parity-scale-codec", + "serde", + "sp-finality-grandpa", + "sp-runtime", + "sp-std", + "sp-trie", +] + +[[package]] +name = "parity-scale-codec" +version = "1.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79602888a81ace83e3d1d4b2873286c1f5f906c84db667594e8db8da3506c383" +dependencies = [ + "arrayvec 0.5.2", + "bitvec", + "byte-slice-cast", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "198db82bb1c18fc00176004462dd809b2a6d851669550aa17af6dacd21ae0c14" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-util-mem" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f17f15cb05897127bf36a240085a1f0bbef7bce3024849eccf7f93f6171bc27" +dependencies = [ + "cfg-if 1.0.0", + "hashbrown", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.11.1", + "primitive-types", + "winapi", +] + +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2", + "syn", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" + +[[package]] +name = "parking_lot" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" +dependencies = [ + "lock_api 0.3.4", + "parking_lot_core 0.6.2", + "rustc_version", +] + +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api 0.4.2", + "parking_lot_core 0.8.2", +] + +[[package]] +name = "parking_lot_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +dependencies = [ + "cfg-if 0.1.10", + "cloudabi", + "libc", + "redox_syscall", + "rustc_version", + "smallvec 0.6.14", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall", + "smallvec 1.6.1", + "winapi", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "pbkdf2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" +dependencies = [ + "byteorder", + "crypto-mac 0.7.0", +] + +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "primitive-types" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3824ae2c5e27160113b9e029a10ec9e3f0237bad8029f69c7724393c9fdefd8" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro-nested" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" + +[[package]] +name = "proc-macro2" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", + "rand_pcg", +] + +[[package]] +name = "rand" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" +dependencies = [ + "libc", + "rand_chacha 0.3.0", + "rand_core 0.6.1", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_chacha" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.1", +] + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] + +[[package]] +name = "rand_core" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" +dependencies = [ + "getrandom 0.2.2", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_pcg" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "ref-cast" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "regex" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" + +[[package]] +name = "rustc-demangle" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "schnorrkel" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.2", + "getrandom 0.1.16", + "merlin", + "rand 0.7.3", + "rand_core 0.5.1", + "serde", + "sha2 0.8.2", + "subtle 2.4.0", + "zeroize", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "secrecy" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" +dependencies = [ + "zeroize", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "166b2349061381baf54a58e4b13c89369feb0ef2eaa57198899e2312aac30aab" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.120" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca2a8cb5805ce9e3b95435e3765b7b553cecc762d938d409434338386cb5775" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sharded-slab" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signature" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "smallvec" +version = "0.6.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "smallvec" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" + +[[package]] +name = "sp-api" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "hash-db", + "parity-scale-codec", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-version", + "thiserror", +] + +[[package]] +name = "sp-api-proc-macro" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "blake2-rfc", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-application-crypto" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-arithmetic" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-core" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "base58", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot 0.11.1", + "primitive-types", + "rand 0.7.3", + "regex", + "schnorrkel", + "secrecy", + "serde", + "sha2 0.9.2", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize", +] + +[[package]] +name = "sp-debug-derive" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.8.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std", + "sp-storage", +] + +[[package]] +name = "sp-finality-grandpa" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "finality-grandpa", + "log", + "parity-scale-codec", + "serde", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-keystore", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-inherents" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "parity-scale-codec", + "parking_lot 0.11.1", + "sp-core", + "sp-std", + "thiserror", +] + +[[package]] +name = "sp-io" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "futures", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "parking_lot 0.11.1", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime-interface", + "sp-state-machine", + "sp-std", + "sp-tracing", + "sp-trie", + "sp-wasm-interface", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-keyring" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "lazy_static", + "sp-core", + "sp-runtime", + "strum", +] + +[[package]] +name = "sp-keystore" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "async-trait", + "derive_more", + "futures", + "merlin", + "parity-scale-codec", + "parking_lot 0.11.1", + "schnorrkel", + "sp-core", + "sp-externalities", +] + +[[package]] +name = "sp-panic-handler" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "backtrace", +] + +[[package]] +name = "sp-runtime" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-runtime-interface" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-state-machine" +version = "0.8.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.11.1", + "rand 0.7.3", + "smallvec 1.6.1", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-std", + "sp-trie", + "thiserror", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-std" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" + +[[package]] +name = "sp-storage" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-tracing" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "log", + "parity-scale-codec", + "sp-std", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sp-trie" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "sp-core", + "sp-std", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-version" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "serde", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-wasm-interface" +version = "2.0.1" +source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-std", + "wasmi", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6138f8f88a16d90134763314e3fc76fa3ed6a7db4725d6acf9a3ef95a3188d22" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "substrate-bip39" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" +dependencies = [ + "hmac 0.7.1", + "pbkdf2 0.3.0", + "schnorrkel", + "sha2 0.8.2", + "zeroize", +] + +[[package]] +name = "substrate-bridge-fuzzer" +version = "0.1.0" +dependencies = [ + "bp-header-chain", + "bp-runtime", + "bp-test-utils", + "finality-grandpa", + "frame-support", + "frame-system", + "hash-db", + "honggfuzz", + "pallet-substrate-bridge", + "parity-scale-codec", + "serde", + "sp-core", + "sp-finality-grandpa", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-trie", +] + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" + +[[package]] +name = "syn" +version = "1.0.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "thiserror" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76cc616c6abf8c8928e2fdcc0dbfab37175edd8fb49a4641066ad1364fdab146" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9be73a2caec27583d0046ef3796c3794f868a5bc813db689eed00c7631275cd1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb9bc092d0d51e76b2b19d9d85534ffc9ec2db959a2523cdae0697e2972cd447" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "time" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "tiny-bip39" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" +dependencies = [ + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.2", + "thiserror", + "unicode-normalization", + "zeroize", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "toml" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +dependencies = [ + "serde", +] + +[[package]] +name = "tracing" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +dependencies = [ + "cfg-if 1.0.0", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tracing-log" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec 1.6.1", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "trie-db" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cc176c377eb24d652c9c69c832c832019011b6106182bf84276c66b66d5c9a6" +dependencies = [ + "hash-db", + "hashbrown", + "log", + "rustc-hex", + "smallvec 1.6.1", +] + +[[package]] +name = "trie-root" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" +dependencies = [ + "hash-db", +] + +[[package]] +name = "twox-hash" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" +dependencies = [ + "cfg-if 0.1.10", + "rand 0.7.3", + "static_assertions", +] + +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + +[[package]] +name = "uint" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" + +[[package]] +name = "wasmi" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +dependencies = [ + "libc", + "memory_units", + "num-rational", + "num-traits", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "zeroize" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] diff --git a/polkadot/fuzz/storage-proof/Cargo.toml b/polkadot/fuzz/storage-proof/Cargo.toml new file mode 100644 index 00000000000..05456114e6b --- /dev/null +++ b/polkadot/fuzz/storage-proof/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "storage-proof-fuzzer" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.3.1" } +finality-grandpa = "0.12.3" +hash-db = "0.15.2" +honggfuzz = "0.5.54" +log = "0.4.0" +env_logger = "0.8.3" + +# Bridge Dependencies + +bp-header-chain = { path = "../../primitives/header-chain" } +bp-runtime = { path = "../../primitives/runtime" } +bp-test-utils = { path = "../../primitives/test-utils" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/fuzz/storage-proof/README.md b/polkadot/fuzz/storage-proof/README.md new file mode 100644 index 00000000000..da3c7b1565e --- /dev/null +++ b/polkadot/fuzz/storage-proof/README.md @@ -0,0 +1,32 @@ +# Storage Proof Fuzzer + +## How to run? + +Install dependencies: +``` +$ sudo apt install build-essential binutils-dev libunwind-dev +``` + + +Install `cargo hfuzz` plugin: +``` +$ cargo install honggfuzz +``` + +Run: +``` +$ cargo hfuzz run storage-proof-fuzzer +``` + +Use `HFUZZ_RUN_ARGS` to customize execution: +``` +# 1 second of timeout +# use 12 fuzzing thread +# be verbose +# stop after 1000000 fuzzing iteration +# exit upon crash +HFUZZ_RUN_ARGS="-t 1 -n 12 -v -N 1000000 --exit_upon_crash" cargo hfuzz run example +``` + +More details in the [official documentation](https://docs.rs/honggfuzz/0.5.52/honggfuzz/#about-honggfuzz). + diff --git a/polkadot/fuzz/storage-proof/src/main.rs b/polkadot/fuzz/storage-proof/src/main.rs new file mode 100644 index 00000000000..18be72e72f2 --- /dev/null +++ b/polkadot/fuzz/storage-proof/src/main.rs @@ -0,0 +1,84 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Storage Proof Checker fuzzer. + +#![warn(missing_docs)] + +use honggfuzz::fuzz; +// Logic for checking Substrate storage proofs. + +use sp_core::{Blake2Hasher, H256}; +use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; +use sp_std::vec::Vec; +use sp_trie::StorageProof; +use std::collections::HashMap; + +fn craft_known_storage_proof(input_vec: Vec<(Vec, Vec)>) -> (H256, StorageProof) { + let storage_proof_vec = vec![( + None, + input_vec.iter().map(|x| (x.0.clone(), Some(x.1.clone()))).collect(), + )]; + log::info!("Storage proof vec {:?}", storage_proof_vec); + let backend = >::from(storage_proof_vec); + let root = backend.storage_root(std::iter::empty()).0; + let vector_element_proof = StorageProof::new( + prove_read(backend, input_vec.iter().map(|x| x.0.as_slice())) + .unwrap() + .iter_nodes() + .collect(), + ); + (root, vector_element_proof) +} + +fn transform_into_unique(input_vec: Vec<(Vec, Vec)>) -> Vec<(Vec, Vec)> { + let mut output_hashmap = HashMap::new(); + let mut output_vec = Vec::new(); + for key_value_pair in input_vec.clone() { + output_hashmap.insert(key_value_pair.0, key_value_pair.1); //Only 1 value per key + } + for (key, val) in output_hashmap.iter() { + output_vec.push((key.clone(), val.clone())); + } + output_vec +} + +fn run_fuzzer() { + fuzz!(|input_vec: Vec<(Vec, Vec)>| { + if input_vec.is_empty() { + return; + } + let unique_input_vec = transform_into_unique(input_vec); + let (root, craft_known_storage_proof) = craft_known_storage_proof(unique_input_vec.clone()); + let checker = >::new(root, craft_known_storage_proof) + .expect("Valid proof passed; qed"); + for key_value_pair in unique_input_vec { + log::info!("Reading value for pair {:?}", key_value_pair); + assert_eq!( + checker.read_value(&key_value_pair.0), + Ok(Some(key_value_pair.1.clone())) + ); + } + }) +} + +fn main() { + env_logger::init(); + + loop { + run_fuzzer(); + } +} diff --git a/polkadot/modules/currency-exchange/Cargo.toml b/polkadot/modules/currency-exchange/Cargo.toml new file mode 100644 index 00000000000..8094f0f2b6e --- /dev/null +++ b/polkadot/modules/currency-exchange/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "pallet-bridge-currency-exchange" +description = "A Substrate Runtime module that accepts 'lock funds' transactions from a peer chain and grants an equivalent amount to a the appropriate Substrate account." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } +serde = { version = "1.0", optional = true } + +# Bridge dependencies + +bp-currency-exchange = { path = "../../primitives/currency-exchange", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } + +# Substrate Dependencies + +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = ["std"] +std = [ + "bp-currency-exchange/std", + "bp-header-chain/std", + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "serde", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-std", +] diff --git a/polkadot/modules/currency-exchange/src/benchmarking.rs b/polkadot/modules/currency-exchange/src/benchmarking.rs new file mode 100644 index 00000000000..574ae93f6ee --- /dev/null +++ b/polkadot/modules/currency-exchange/src/benchmarking.rs @@ -0,0 +1,134 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Exchange module complexity is mostly determined by callbacks, defined by runtime. +//! So we are giving runtime opportunity to prepare environment and construct proof +//! before invoking module calls. + +use super::{ + Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, Instance, Pallet as CurrencyExchangePallet, +}; +use sp_std::prelude::*; + +use frame_benchmarking::{account, benchmarks_instance}; +use frame_system::RawOrigin; + +const SEED: u32 = 0; +const WORST_TX_SIZE_FACTOR: u32 = 1000; +const WORST_PROOF_SIZE_FACTOR: u32 = 1000; + +/// Pallet we're benchmarking here. +pub struct Pallet, I: Instance>(CurrencyExchangePallet); + +/// Proof benchmarking parameters. +pub struct ProofParams { + /// Funds recipient. + pub recipient: Recipient, + /// When true, recipient must exists before import. + pub recipient_exists: bool, + /// When 0, transaction should have minimal possible size. When this value has non-zero value n, + /// transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. + pub transaction_size_factor: u32, + /// When 0, proof should have minimal possible size. When this value has non-zero value n, + /// proof size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR. + pub proof_size_factor: u32, +} + +/// Config that must be implemented by runtime. +pub trait Config: CurrencyExchangeConfig { + /// Prepare proof for importing exchange transaction. + fn make_proof( + proof_params: ProofParams, + ) -> <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof; +} + +benchmarks_instance! { + // Benchmark `import_peer_transaction` extrinsic with the best possible conditions: + // * Proof is the transaction itself. + // * Transaction has minimal size. + // * Recipient account exists. + import_peer_transaction_best_case { + let i in 1..100; + + let recipient: T::AccountId = account("recipient", i, SEED); + let proof = T::make_proof(ProofParams { + recipient: recipient.clone(), + recipient_exists: true, + transaction_size_factor: 0, + proof_size_factor: 0, + }); + }: import_peer_transaction(RawOrigin::Signed(recipient), proof) + + // Benchmark `import_peer_transaction` extrinsic when recipient account does not exists. + import_peer_transaction_when_recipient_does_not_exists { + let i in 1..100; + + let recipient: T::AccountId = account("recipient", i, SEED); + let proof = T::make_proof(ProofParams { + recipient: recipient.clone(), + recipient_exists: false, + transaction_size_factor: 0, + proof_size_factor: 0, + }); + }: import_peer_transaction(RawOrigin::Signed(recipient), proof) + + // Benchmark `import_peer_transaction` when transaction size increases. + import_peer_transaction_when_transaction_size_increases { + let i in 1..100; + let n in 1..WORST_TX_SIZE_FACTOR; + + let recipient: T::AccountId = account("recipient", i, SEED); + let proof = T::make_proof(ProofParams { + recipient: recipient.clone(), + recipient_exists: true, + transaction_size_factor: n, + proof_size_factor: 0, + }); + }: import_peer_transaction(RawOrigin::Signed(recipient), proof) + + // Benchmark `import_peer_transaction` when proof size increases. + import_peer_transaction_when_proof_size_increases { + let i in 1..100; + let n in 1..WORST_PROOF_SIZE_FACTOR; + + let recipient: T::AccountId = account("recipient", i, SEED); + let proof = T::make_proof(ProofParams { + recipient: recipient.clone(), + recipient_exists: true, + transaction_size_factor: 0, + proof_size_factor: n, + }); + }: import_peer_transaction(RawOrigin::Signed(recipient), proof) + + // Benchmark `import_peer_transaction` extrinsic with the worst possible conditions: + // * Proof is large. + // * Transaction has large size. + // * Recipient account does not exists. + import_peer_transaction_worst_case { + let i in 1..100; + let m in WORST_TX_SIZE_FACTOR..WORST_TX_SIZE_FACTOR+1; + let n in WORST_PROOF_SIZE_FACTOR..WORST_PROOF_SIZE_FACTOR+1; + + let recipient: T::AccountId = account("recipient", i, SEED); + let proof = T::make_proof(ProofParams { + recipient: recipient.clone(), + recipient_exists: false, + transaction_size_factor: m, + proof_size_factor: n, + }); + }: import_peer_transaction(RawOrigin::Signed(recipient), proof) + +} diff --git a/polkadot/modules/currency-exchange/src/lib.rs b/polkadot/modules/currency-exchange/src/lib.rs new file mode 100644 index 00000000000..542082f85ab --- /dev/null +++ b/polkadot/modules/currency-exchange/src/lib.rs @@ -0,0 +1,496 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Runtime module that allows tokens exchange between two bridged chains. + +#![cfg_attr(not(feature = "std"), no_std)] + +use bp_currency_exchange::{ + CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction, RecipientsMap, +}; +use bp_header_chain::InclusionProofVerifier; +use frame_support::{decl_error, decl_module, decl_storage, ensure}; +use sp_runtime::DispatchResult; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; + +/// Called when transaction is submitted to the exchange module. +pub trait OnTransactionSubmitted { + /// Called when valid transaction is submitted and accepted by the module. + fn on_valid_transaction_submitted(submitter: AccountId); +} + +/// The module configuration trait +pub trait Config: frame_system::Config { + /// Handler for transaction submission result. + type OnTransactionSubmitted: OnTransactionSubmitted; + /// Represents the blockchain that we'll be exchanging currency with. + type PeerBlockchain: InclusionProofVerifier; + /// Peer blockchain transaction parser. + type PeerMaybeLockFundsTransaction: MaybeLockFundsTransaction< + Transaction = ::Transaction, + >; + /// Map between blockchains recipients. + type RecipientsMap: RecipientsMap< + PeerRecipient = ::Recipient, + Recipient = Self::AccountId, + >; + /// This blockchain currency amount type. + type Amount; + /// Converter from peer blockchain currency type into current blockchain currency type. + type CurrencyConverter: CurrencyConverter< + SourceAmount = ::Amount, + TargetAmount = Self::Amount, + >; + /// Something that could grant money. + type DepositInto: DepositInto; +} + +decl_error! { + pub enum Error for Pallet, I: Instance> { + /// Invalid peer blockchain transaction provided. + InvalidTransaction, + /// Peer transaction has invalid amount. + InvalidAmount, + /// Peer transaction has invalid recipient. + InvalidRecipient, + /// Cannot map from peer recipient to this blockchain recipient. + FailedToMapRecipients, + /// Failed to convert from peer blockchain currency to this blockhain currency. + FailedToConvertCurrency, + /// Deposit has failed. + DepositFailed, + /// Deposit has partially failed (changes to recipient account were made). + DepositPartiallyFailed, + /// Transaction is not finalized. + UnfinalizedTransaction, + /// Transaction funds are already claimed. + AlreadyClaimed, + } +} + +decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + /// Imports lock fund transaction of the peer blockchain. + #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + pub fn import_peer_transaction( + origin, + proof: <>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, + ) -> DispatchResult { + let submitter = frame_system::ensure_signed(origin)?; + + // verify and parse transaction proof + let deposit = prepare_deposit_details::(&proof)?; + + // make sure to update the mapping if we deposit successfully to avoid double spending, + // i.e. whenever `deposit_into` is successful we MUST update `Transfers`. + { + // if any changes were made to the storage, we can't just return error here, because + // otherwise the same proof may be imported again + let deposit_result = T::DepositInto::deposit_into(deposit.recipient, deposit.amount); + match deposit_result { + Ok(_) => (), + Err(ExchangeError::DepositPartiallyFailed) => (), + Err(error) => return Err(Error::::from(error).into()), + } + Transfers::::insert(&deposit.transfer_id, ()) + } + + // reward submitter for providing valid message + T::OnTransactionSubmitted::on_valid_transaction_submitted(submitter); + + log::trace!( + target: "runtime", + "Completed currency exchange: {:?}", + deposit.transfer_id, + ); + + Ok(()) + } + } +} + +decl_storage! { + trait Store for Pallet, I: Instance = DefaultInstance> as Bridge { + /// All transfers that have already been claimed. + Transfers: map hasher(blake2_128_concat) ::Id => (); + } +} + +impl, I: Instance> Pallet { + /// Returns true if currency exchange module is able to import given transaction proof in + /// its current state. + pub fn filter_transaction_proof( + proof: &::TransactionInclusionProof, + ) -> bool { + if let Err(err) = prepare_deposit_details::(proof) { + log::trace!( + target: "runtime", + "Can't accept exchange transaction: {:?}", + err, + ); + + return false; + } + + true + } +} + +impl, I: Instance> From for Error { + fn from(error: ExchangeError) -> Self { + match error { + ExchangeError::InvalidTransaction => Error::InvalidTransaction, + ExchangeError::InvalidAmount => Error::InvalidAmount, + ExchangeError::InvalidRecipient => Error::InvalidRecipient, + ExchangeError::FailedToMapRecipients => Error::FailedToMapRecipients, + ExchangeError::FailedToConvertCurrency => Error::FailedToConvertCurrency, + ExchangeError::DepositFailed => Error::DepositFailed, + ExchangeError::DepositPartiallyFailed => Error::DepositPartiallyFailed, + } + } +} + +impl OnTransactionSubmitted for () { + fn on_valid_transaction_submitted(_: AccountId) {} +} + +/// Exchange deposit details. +struct DepositDetails, I: Instance> { + /// Transfer id. + pub transfer_id: ::Id, + /// Transfer recipient. + pub recipient: ::Recipient, + /// Transfer amount. + pub amount: ::TargetAmount, +} + +/// Verify and parse transaction proof, preparing everything required for importing +/// this transaction proof. +fn prepare_deposit_details, I: Instance>( + proof: &<>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof, +) -> Result, Error> { + // ensure that transaction is included in finalized block that we know of + let transaction = >::PeerBlockchain::verify_transaction_inclusion_proof(proof) + .ok_or(Error::::UnfinalizedTransaction)?; + + // parse transaction + let transaction = + >::PeerMaybeLockFundsTransaction::parse(&transaction).map_err(Error::::from)?; + let transfer_id = transaction.id; + ensure!( + !Transfers::::contains_key(&transfer_id), + Error::::AlreadyClaimed + ); + + // grant recipient + let recipient = T::RecipientsMap::map(transaction.recipient).map_err(Error::::from)?; + let amount = T::CurrencyConverter::convert(transaction.amount).map_err(Error::::from)?; + + Ok(DepositDetails { + transfer_id, + recipient, + amount, + }) +} + +#[cfg(test)] +mod tests { + // From construct_runtime macro + #![allow(clippy::from_over_into)] + + use super::*; + use bp_currency_exchange::LockFundsTransaction; + use frame_support::{assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight}; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + type AccountId = u64; + + const INVALID_TRANSACTION_ID: u64 = 100; + const ALREADY_CLAIMED_TRANSACTION_ID: u64 = 101; + const UNKNOWN_RECIPIENT_ID: u64 = 0; + const INVALID_AMOUNT: u64 = 0; + const MAX_DEPOSIT_AMOUNT: u64 = 1000; + const SUBMITTER: u64 = 2000; + + type RawTransaction = LockFundsTransaction; + + pub struct DummyTransactionSubmissionHandler; + + impl OnTransactionSubmitted for DummyTransactionSubmissionHandler { + fn on_valid_transaction_submitted(submitter: AccountId) { + Transfers::::insert(submitter, ()); + } + } + + pub struct DummyBlockchain; + + impl InclusionProofVerifier for DummyBlockchain { + type Transaction = RawTransaction; + type TransactionInclusionProof = (bool, RawTransaction); + + fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option { + if proof.0 { + Some(proof.1.clone()) + } else { + None + } + } + } + + pub struct DummyTransaction; + + impl MaybeLockFundsTransaction for DummyTransaction { + type Transaction = RawTransaction; + type Id = u64; + type Recipient = AccountId; + type Amount = u64; + + fn parse(tx: &Self::Transaction) -> bp_currency_exchange::Result { + match tx.id { + INVALID_TRANSACTION_ID => Err(ExchangeError::InvalidTransaction), + _ => Ok(tx.clone()), + } + } + } + + pub struct DummyRecipientsMap; + + impl RecipientsMap for DummyRecipientsMap { + type PeerRecipient = AccountId; + type Recipient = AccountId; + + fn map(peer_recipient: Self::PeerRecipient) -> bp_currency_exchange::Result { + match peer_recipient { + UNKNOWN_RECIPIENT_ID => Err(ExchangeError::FailedToMapRecipients), + _ => Ok(peer_recipient * 10), + } + } + } + + pub struct DummyCurrencyConverter; + + impl CurrencyConverter for DummyCurrencyConverter { + type SourceAmount = u64; + type TargetAmount = u64; + + fn convert(amount: Self::SourceAmount) -> bp_currency_exchange::Result { + match amount { + INVALID_AMOUNT => Err(ExchangeError::FailedToConvertCurrency), + _ => Ok(amount * 10), + } + } + } + + pub struct DummyDepositInto; + + impl DepositInto for DummyDepositInto { + type Recipient = AccountId; + type Amount = u64; + + fn deposit_into(_recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> { + match amount { + amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()), + amount if amount == MAX_DEPOSIT_AMOUNT * 10 => Err(ExchangeError::DepositPartiallyFailed), + _ => Err(ExchangeError::DepositFailed), + } + } + } + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + use crate as pallet_bridge_currency_exchange; + + construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Exchange: pallet_bridge_currency_exchange::{Pallet}, + } + } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type SS58Prefix = (); + type OnSetCode = (); + } + + impl Config for TestRuntime { + type OnTransactionSubmitted = DummyTransactionSubmissionHandler; + type PeerBlockchain = DummyBlockchain; + type PeerMaybeLockFundsTransaction = DummyTransaction; + type RecipientsMap = DummyRecipientsMap; + type Amount = u64; + type CurrencyConverter = DummyCurrencyConverter; + type DepositInto = DummyDepositInto; + } + + fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + sp_io::TestExternalities::new(t) + } + + fn transaction(id: u64) -> RawTransaction { + RawTransaction { + id, + recipient: 1, + amount: 2, + } + } + + #[test] + fn unfinalized_transaction_rejected() { + new_test_ext().execute_with(|| { + assert_noop!( + Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (false, transaction(0))), + Error::::UnfinalizedTransaction, + ); + }); + } + + #[test] + fn invalid_transaction_rejected() { + new_test_ext().execute_with(|| { + assert_noop!( + Exchange::import_peer_transaction( + Origin::signed(SUBMITTER), + (true, transaction(INVALID_TRANSACTION_ID)), + ), + Error::::InvalidTransaction, + ); + }); + } + + #[test] + fn claimed_transaction_rejected() { + new_test_ext().execute_with(|| { + ::Transfers::insert(ALREADY_CLAIMED_TRANSACTION_ID, ()); + assert_noop!( + Exchange::import_peer_transaction( + Origin::signed(SUBMITTER), + (true, transaction(ALREADY_CLAIMED_TRANSACTION_ID)), + ), + Error::::AlreadyClaimed, + ); + }); + } + + #[test] + fn transaction_with_unknown_recipient_rejected() { + new_test_ext().execute_with(|| { + let mut transaction = transaction(0); + transaction.recipient = UNKNOWN_RECIPIENT_ID; + assert_noop!( + Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), + Error::::FailedToMapRecipients, + ); + }); + } + + #[test] + fn transaction_with_invalid_amount_rejected() { + new_test_ext().execute_with(|| { + let mut transaction = transaction(0); + transaction.amount = INVALID_AMOUNT; + assert_noop!( + Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), + Error::::FailedToConvertCurrency, + ); + }); + } + + #[test] + fn transaction_with_invalid_deposit_rejected() { + new_test_ext().execute_with(|| { + let mut transaction = transaction(0); + transaction.amount = MAX_DEPOSIT_AMOUNT + 1; + assert_noop!( + Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)), + Error::::DepositFailed, + ); + }); + } + + #[test] + fn valid_transaction_accepted_even_if_deposit_partially_fails() { + new_test_ext().execute_with(|| { + let mut transaction = transaction(0); + transaction.amount = MAX_DEPOSIT_AMOUNT; + assert_ok!(Exchange::import_peer_transaction( + Origin::signed(SUBMITTER), + (true, transaction), + ),); + + // ensure that the transfer has been marked as completed + assert!(::Transfers::contains_key(0u64)); + // ensure that submitter has been rewarded + assert!(::Transfers::contains_key(SUBMITTER)); + }); + } + + #[test] + fn valid_transaction_accepted() { + new_test_ext().execute_with(|| { + assert_ok!(Exchange::import_peer_transaction( + Origin::signed(SUBMITTER), + (true, transaction(0)), + ),); + + // ensure that the transfer has been marked as completed + assert!(::Transfers::contains_key(0u64)); + // ensure that submitter has been rewarded + assert!(::Transfers::contains_key(SUBMITTER)); + }); + } +} diff --git a/polkadot/modules/dispatch/Cargo.toml b/polkadot/modules/dispatch/Cargo.toml new file mode 100644 index 00000000000..6170af272ad --- /dev/null +++ b/polkadot/modules/dispatch/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "pallet-bridge-dispatch" +description = "A Substrate Runtime module that dispatches a bridge message, treating it simply as encoded Call" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } + +# Bridge dependencies + +bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } +serde = "1.0" + +[features] +default = ["std"] +std = [ + "bp-message-dispatch/std", + "bp-runtime/std", + "frame-support/std", + "frame-system/std", + "log/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/polkadot/modules/dispatch/README.md b/polkadot/modules/dispatch/README.md new file mode 100644 index 00000000000..f2ee04beaf5 --- /dev/null +++ b/polkadot/modules/dispatch/README.md @@ -0,0 +1,61 @@ +# Call Dispatch Module + +The call dispatch module has a single internal (only callable by other runtime modules) entry point +for dispatching encoded calls (`pallet_bridge_dispatch::Module::dispatch`). Every dispatch +(successful or not) emits a corresponding module event. The module doesn't have any call-related +requirements - they may come from the bridged chain over some message lane, or they may be crafted +locally. But in this document we'll mostly talk about this module in the context of bridges. + +Every message that is being dispatched has three main characteristics: +- `bridge` is the 4-bytes identifier of the bridge where this message comes from. This may be the + identifier of the bridged chain (like `b"rlto"` for messages coming from `Rialto`), or the + identifier of the bridge itself (`b"rimi"` for `Rialto` <-> `Millau` bridge); +- `id` is the unique id of the message within the given bridge. For messages coming from the + [messages module](../messages/README.md), it may worth to use a tuple + `(LaneId, MessageNonce)` to identify a message; +- `message` is the `pallet_bridge_dispatch::MessagePayload` structure. The `call` field is set + to the (potentially) encoded `Call` of this chain. + +The easiest way to understand what is happening when a `Call` is being dispatched, is to look at the +module events set: + +- `MessageRejected` event is emitted if a message has been rejected even before it has reached the + module. Dispatch then is called just to reflect the fact that message has been received, but we + have failed to pre-process it (e.g. because we have failed to decode `MessagePayload` structure + from the proof); +- `MessageVersionSpecMismatch` event is emitted if current runtime specification version differs + from the version that has been used to encode the `Call`. The message payload has the + `spec_version`, that is filled by the message submitter. If this value differs from the current + runtime version, dispatch mechanism rejects to dispatch the message. Without this check, we may + decode the wrong `Call` for example if method arguments were changed; +- `MessageCallDecodeFailed` event is emitted if we have failed to decode `Call` from the payload. + This may happen if the submitter has provided incorrect value in the `call` field, or if source + chain storage has been corrupted. The `Call` is decoded after `spec_version` check, so we'll never + try to decode `Call` from other runtime version; +- `MessageSignatureMismatch` event is emitted if submitter has chose to dispatch message using + specified this chain account (`pallet_bridge_dispatch::CallOrigin::TargetAccount` origin), + but he has failed to prove that he owns the private key for this account; +- `MessageCallRejected` event is emitted if the module has been deployed with some call filter and + this filter has rejected the `Call`. In your bridge you may choose to reject all messages except + e.g. balance transfer calls; +- `MessageWeightMismatch` event is emitted if the message submitter has specified invalid `Call` + dispatch weight in the `weight` field of the message payload. The value of this field is compared + to the pre-dispatch weight of the decoded `Call`. If it is less than the actual pre-dispatch + weight, the dispatch is rejected. Keep in mind, that even if post-dispatch weight will be less + than specified, the submitter still have to declare (and pay for) the maximal possible weight + (that is the pre-dispatch weight); +- `MessageDispatched` event is emitted if the message has passed all checks and we have actually + dispatched it. The dispatch may still fail, though - that's why we are including the dispatch + result in the event payload. + +When we talk about module in context of bridges, these events are helping in following cases: + +1. when the message submitter has access to the state of both chains and wants to monitor what has + happened with his message. Then he could use the message id (that he gets from the + [messages module events](../messages/README.md#General-Information)) to filter events of + call dispatch module at the target chain and actually see what has happened with his message; + +1. when the message submitter only has access to the source chain state (for example, when sender is + the runtime module at the source chain). In this case, your bridge may have additional mechanism + to deliver dispatch proofs (which are storage proof of module events) back to the source chain, + thus allowing the submitter to see what has happened with his messages. diff --git a/polkadot/modules/dispatch/src/lib.rs b/polkadot/modules/dispatch/src/lib.rs new file mode 100644 index 00000000000..416d080b0c1 --- /dev/null +++ b/polkadot/modules/dispatch/src/lib.rs @@ -0,0 +1,865 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Runtime module which takes care of dispatching messages received over the bridge. +//! +//! The messages are interpreted directly as runtime `Call`. We attempt to decode +//! them and then dispatch as usual. To prevent compatibility issues, the Calls have +//! to include a `spec_version`. This will be checked before dispatch. In the case of +//! a succesful dispatch an event is emitted. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +use bp_message_dispatch::{MessageDispatch, Weight}; +use bp_runtime::{derive_account_id, InstanceId, Size, SourceAccount}; +use codec::{Decode, Encode}; +use frame_support::{ + decl_event, decl_module, decl_storage, + dispatch::{Dispatchable, Parameter}, + ensure, + traits::{Filter, Get}, + weights::{extract_actual_weight, GetDispatchInfo}, + RuntimeDebug, +}; +use frame_system::RawOrigin; +use sp_runtime::{ + traits::{BadOrigin, Convert, IdentifyAccount, MaybeDisplay, MaybeSerializeDeserialize, Member, Verify}, + DispatchResult, +}; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; + +/// Spec version type. +pub type SpecVersion = u32; + +// TODO [#895] move to primitives +/// Origin of a Call when it is dispatched on the target chain. +/// +/// The source chain can (and should) verify that the message can be dispatched on the target chain +/// with a particular origin given the source chain's origin. This can be done with the +/// `verify_message_origin()` function. +#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq)] +pub enum CallOrigin { + /// Call is sent by the Root origin on the source chain. On the target chain it is dispatched + /// from a derived account. + /// + /// The derived account represents the source Root account on the target chain. This is useful + /// if the target chain needs some way of knowing that a call came from a priviledged origin on + /// the source chain (maybe to allow a configuration change for example). + SourceRoot, + + /// Call is sent by `SourceChainAccountId` on the source chain. On the target chain it is + /// dispatched from an account controlled by a private key on the target chain. + /// + /// The account can be identified by `TargetChainAccountPublic`. The proof that the + /// `SourceChainAccountId` controls `TargetChainAccountPublic` is the `TargetChainSignature` + /// over `(Call, SourceChainAccountId, TargetChainSpecVersion, SourceChainBridgeId).encode()`. + /// + /// NOTE sending messages using this origin (or any other) does not have replay protection! + /// The assumption is that both the source account and the target account is controlled by + /// the same entity, so source-chain replay protection is sufficient. + /// As a consequence, it's extremely important for the target chain user to never produce + /// a signature with their target-private key on something that could be sent over the bridge, + /// i.e. if the target user signs `(, Call::Transfer(X, 5))` + /// The owner of `some-source-account-id` can send that message multiple times, which would + /// result with multiple transfer calls being dispatched on the target chain. + /// So please, NEVER USE YOUR PRIVATE KEY TO SIGN SOMETHING YOU DON'T FULLY UNDERSTAND! + TargetAccount(SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature), + + /// Call is sent by the `SourceChainAccountId` on the source chain. On the target chain it is + /// dispatched from a derived account ID. + /// + /// The account ID on the target chain is derived from the source account ID This is useful if + /// you need a way to represent foreign accounts on this chain for call dispatch purposes. + /// + /// Note that the derived account does not need to have a private key on the target chain. This + /// origin can therefore represent proxies, pallets, etc. as well as "regular" accounts. + SourceAccount(SourceChainAccountId), +} + +// TODO [#895] move to primitives +/// Message payload type used by dispatch module. +#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq)] +pub struct MessagePayload { + /// Runtime specification version. We only dispatch messages that have the same + /// runtime version. Otherwise we risk to misinterpret encoded calls. + pub spec_version: SpecVersion, + /// Weight of the call, declared by the message sender. If it is less than actual + /// static weight, the call is not dispatched. + pub weight: Weight, + /// Call origin to be used during dispatch. + pub origin: CallOrigin, + /// The call itself. + pub call: Call, +} + +impl Size + for MessagePayload> +{ + fn size_hint(&self) -> u32 { + self.call.len() as _ + } +} + +/// The module configuration trait. +pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + Into<::Event>; + /// Id of the message. Whenever message is passed to the dispatch module, it emits + /// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if + /// it comes from the messages module. + type MessageId: Parameter; + /// Type of account ID on source chain. + type SourceChainAccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default; + /// Type of account public key on target chain. + type TargetChainAccountPublic: Parameter + IdentifyAccount; + /// Type of signature that may prove that the message has been signed by + /// owner of `TargetChainAccountPublic`. + type TargetChainSignature: Parameter + Verify; + /// The overarching dispatch call type. + type Call: Parameter + + GetDispatchInfo + + Dispatchable< + Origin = ::Origin, + PostInfo = frame_support::dispatch::PostDispatchInfo, + >; + /// Pre-dispatch filter for incoming calls. + /// + /// The pallet will filter all incoming calls right before they're dispatched. If this filter + /// rejects the call, special event (`Event::MessageCallRejected`) is emitted. + type CallFilter: Filter<>::Call>; + /// The type that is used to wrap the `Self::Call` when it is moved over bridge. + /// + /// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure + /// that all other stuff (like `spec_version`) is ok. If we would try to decode + /// `Call` which has been encoded using previous `spec_version`, then we might end + /// up with decoding error, instead of `MessageVersionSpecMismatch`. + type EncodedCall: Decode + Encode + Into>::Call, ()>>; + /// A type which can be turned into an AccountId from a 256-bit hash. + /// + /// Used when deriving target chain AccountIds from source chain AccountIds. + type AccountIdConverter: sp_runtime::traits::Convert; +} + +decl_storage! { + trait Store for Pallet, I: Instance = DefaultInstance> as Dispatch {} +} + +decl_event!( + pub enum Event where + >::MessageId + { + /// Message has been rejected before reaching dispatch. + MessageRejected(InstanceId, MessageId), + /// Message has been rejected by dispatcher because of spec version mismatch. + /// Last two arguments are: expected and passed spec version. + MessageVersionSpecMismatch(InstanceId, MessageId, SpecVersion, SpecVersion), + /// Message has been rejected by dispatcher because of weight mismatch. + /// Last two arguments are: expected and passed call weight. + MessageWeightMismatch(InstanceId, MessageId, Weight, Weight), + /// Message signature mismatch. + MessageSignatureMismatch(InstanceId, MessageId), + /// Message has been dispatched with given result. + MessageDispatched(InstanceId, MessageId, DispatchResult), + /// We have failed to decode Call from the message. + MessageCallDecodeFailed(InstanceId, MessageId), + /// The call from the message has been rejected by the call filter. + MessageCallRejected(InstanceId, MessageId), + /// Phantom member, never used. Needed to handle multiple pallet instances. + _Dummy(PhantomData), + } +); + +decl_module! { + /// Call Dispatch FRAME Pallet. + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + /// Deposit one of this module's events by using the default implementation. + fn deposit_event() = default; + } +} + +impl, I: Instance> MessageDispatch for Pallet { + type Message = + MessagePayload; + + fn dispatch_weight(message: &Self::Message) -> Weight { + message.weight + } + + fn dispatch(bridge: InstanceId, id: T::MessageId, message: Result) { + // emit special even if message has been rejected by external component + let message = match message { + Ok(message) => message, + Err(_) => { + log::trace!(target: "runtime::bridge-dispatch", "Message {:?}/{:?}: rejected before actual dispatch", bridge, id); + Self::deposit_event(RawEvent::MessageRejected(bridge, id)); + return; + } + }; + + // verify spec version + // (we want it to be the same, because otherwise we may decode Call improperly) + let expected_version = ::Version::get().spec_version; + if message.spec_version != expected_version { + log::trace!( + "Message {:?}/{:?}: spec_version mismatch. Expected {:?}, got {:?}", + bridge, + id, + expected_version, + message.spec_version, + ); + Self::deposit_event(RawEvent::MessageVersionSpecMismatch( + bridge, + id, + expected_version, + message.spec_version, + )); + return; + } + + // now that we have spec version checked, let's decode the call + let call = match message.call.into() { + Ok(call) => call, + Err(_) => { + log::trace!(target: "runtime::bridge-dispatch", "Failed to decode Call from message {:?}/{:?}", bridge, id,); + Self::deposit_event(RawEvent::MessageCallDecodeFailed(bridge, id)); + return; + } + }; + + // prepare dispatch origin + let origin_account = match message.origin { + CallOrigin::SourceRoot => { + let hex_id = derive_account_id::(bridge, SourceAccount::Root); + let target_id = T::AccountIdConverter::convert(hex_id); + log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id); + target_id + } + CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => { + let digest = account_ownership_digest(&call, source_account_id, message.spec_version, bridge); + + let target_account = target_public.into_account(); + if !target_signature.verify(&digest[..], &target_account) { + log::trace!( + target: "runtime::bridge-dispatch", + "Message {:?}/{:?}: origin proof is invalid. Expected account: {:?} from signature: {:?}", + bridge, + id, + target_account, + target_signature, + ); + Self::deposit_event(RawEvent::MessageSignatureMismatch(bridge, id)); + return; + } + + log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account); + target_account + } + CallOrigin::SourceAccount(source_account_id) => { + let hex_id = derive_account_id(bridge, SourceAccount::Account(source_account_id)); + let target_id = T::AccountIdConverter::convert(hex_id); + log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id); + target_id + } + }; + + // filter the call + if !T::CallFilter::filter(&call) { + log::trace!( + target: "runtime::bridge-dispatch", + "Message {:?}/{:?}: the call ({:?}) is rejected by filter", + bridge, + id, + call, + ); + Self::deposit_event(RawEvent::MessageCallRejected(bridge, id)); + return; + } + + // verify weight + // (we want passed weight to be at least equal to pre-dispatch weight of the call + // because otherwise Calls may be dispatched at lower price) + let dispatch_info = call.get_dispatch_info(); + let expected_weight = dispatch_info.weight; + if message.weight < expected_weight { + log::trace!( + target: "runtime::bridge-dispatch", + "Message {:?}/{:?}: passed weight is too low. Expected at least {:?}, got {:?}", + bridge, + id, + expected_weight, + message.weight, + ); + Self::deposit_event(RawEvent::MessageWeightMismatch( + bridge, + id, + expected_weight, + message.weight, + )); + return; + } + + // finally dispatch message + let origin = RawOrigin::Signed(origin_account).into(); + + log::trace!(target: "runtime::bridge-dispatch", "Message being dispatched is: {:?}", &call); + let dispatch_result = call.dispatch(origin); + let actual_call_weight = extract_actual_weight(&dispatch_result, &dispatch_info); + + log::trace!( + target: "runtime::bridge-dispatch", + "Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}", + bridge, + id, + actual_call_weight, + message.weight, + dispatch_result, + ); + + Self::deposit_event(RawEvent::MessageDispatched( + bridge, + id, + dispatch_result.map(drop).map_err(|e| e.error), + )); + } +} + +/// Check if the message is allowed to be dispatched on the target chain given the sender's origin +/// on the source chain. +/// +/// For example, if a message is sent from a "regular" account on the source chain it will not be +/// allowed to be dispatched as Root on the target chain. This is a useful check to do on the source +/// chain _before_ sending a message whose dispatch will be rejected on the target chain. +pub fn verify_message_origin( + sender_origin: &RawOrigin, + message: &MessagePayload, +) -> Result, BadOrigin> +where + SourceChainAccountId: PartialEq + Clone, +{ + match message.origin { + CallOrigin::SourceRoot => { + ensure!(sender_origin == &RawOrigin::Root, BadOrigin); + Ok(None) + } + CallOrigin::TargetAccount(ref source_account_id, _, _) => { + ensure!( + sender_origin == &RawOrigin::Signed(source_account_id.clone()), + BadOrigin + ); + Ok(Some(source_account_id.clone())) + } + CallOrigin::SourceAccount(ref source_account_id) => { + ensure!( + sender_origin == &RawOrigin::Signed(source_account_id.clone()), + BadOrigin + ); + Ok(Some(source_account_id.clone())) + } + } +} + +/// Target account ownership digest from the source chain. +/// +/// The byte vector returned by this function will be signed with a target chain account +/// private key. This way, the owner of `source_account_id` on the source chain proves that +/// the target chain account private key is also under his control. +pub fn account_ownership_digest( + call: &Call, + source_account_id: AccountId, + target_spec_version: SpecVersion, + source_instance_id: BridgeId, +) -> Vec +where + Call: Encode, + AccountId: Encode, + SpecVersion: Encode, + BridgeId: Encode, +{ + let mut proof = Vec::new(); + call.encode_to(&mut proof); + source_account_id.encode_to(&mut proof); + target_spec_version.encode_to(&mut proof); + source_instance_id.encode_to(&mut proof); + + proof +} + +#[cfg(test)] +mod tests { + // From construct_runtime macro + #![allow(clippy::from_over_into)] + + use super::*; + use frame_support::{parameter_types, weights::Weight}; + use frame_system::{EventRecord, Phase}; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + type AccountId = u64; + type MessageId = [u8; 4]; + + #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)] + pub struct TestAccountPublic(AccountId); + + impl IdentifyAccount for TestAccountPublic { + type AccountId = AccountId; + + fn into_account(self) -> AccountId { + self.0 + } + } + + #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)] + pub struct TestSignature(AccountId); + + impl Verify for TestSignature { + type Signer = TestAccountPublic; + + fn verify>(&self, _msg: L, signer: &AccountId) -> bool { + self.0 == *signer + } + } + + pub struct AccountIdConverter; + + impl sp_runtime::traits::Convert for AccountIdConverter { + fn convert(hash: H256) -> AccountId { + hash.to_low_u64_ne() + } + } + + type Block = frame_system::mocking::MockBlock; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + + use crate as call_dispatch; + + frame_support::construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Dispatch: call_dispatch::{Pallet, Call, Event}, + } + } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type SS58Prefix = (); + type OnSetCode = (); + } + + impl Config for TestRuntime { + type Event = Event; + type MessageId = MessageId; + type SourceChainAccountId = AccountId; + type TargetChainAccountPublic = TestAccountPublic; + type TargetChainSignature = TestSignature; + type Call = Call; + type CallFilter = TestCallFilter; + type EncodedCall = EncodedCall; + type AccountIdConverter = AccountIdConverter; + } + + #[derive(Decode, Encode)] + pub struct EncodedCall(Vec); + + impl From for Result { + fn from(call: EncodedCall) -> Result { + Call::decode(&mut &call.0[..]).map_err(drop) + } + } + + pub struct TestCallFilter; + + impl Filter for TestCallFilter { + fn filter(call: &Call) -> bool { + !matches!(*call, Call::System(frame_system::Call::fill_block(_))) + } + } + + const TEST_SPEC_VERSION: SpecVersion = 0; + const TEST_WEIGHT: Weight = 1_000_000_000; + + fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + sp_io::TestExternalities::new(t) + } + + fn prepare_message( + origin: CallOrigin, + call: Call, + ) -> as MessageDispatch<::MessageId>>::Message { + MessagePayload { + spec_version: TEST_SPEC_VERSION, + weight: TEST_WEIGHT, + origin, + call: EncodedCall(call.encode()), + } + } + + fn prepare_root_message( + call: Call, + ) -> as MessageDispatch<::MessageId>>::Message { + prepare_message(CallOrigin::SourceRoot, call) + } + + fn prepare_target_message( + call: Call, + ) -> as MessageDispatch<::MessageId>>::Message { + let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1)); + prepare_message(origin, call) + } + + fn prepare_source_message( + call: Call, + ) -> as MessageDispatch<::MessageId>>::Message { + let origin = CallOrigin::SourceAccount(1); + prepare_message(origin, call) + } + + #[test] + fn should_fail_on_spec_version_mismatch() { + new_test_ext().execute_with(|| { + let bridge = b"ethb".to_owned(); + let id = [0; 4]; + + const BAD_SPEC_VERSION: SpecVersion = 99; + let mut message = + prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + message.spec_version = BAD_SPEC_VERSION; + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageVersionSpecMismatch( + bridge, + id, + TEST_SPEC_VERSION, + BAD_SPEC_VERSION + )), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_fail_on_weight_mismatch() { + new_test_ext().execute_with(|| { + let bridge = b"ethb".to_owned(); + let id = [0; 4]; + let mut message = + prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + message.weight = 0; + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageWeightMismatch( + bridge, id, 1345000, 0, + )), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_fail_on_signature_mismatch() { + new_test_ext().execute_with(|| { + let bridge = b"ethb".to_owned(); + let id = [0; 4]; + + let call_origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(99)); + let message = prepare_message( + call_origin, + Call::System(>::remark(vec![1, 2, 3])), + ); + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageSignatureMismatch( + bridge, id + )), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_emit_event_for_rejected_messages() { + new_test_ext().execute_with(|| { + let bridge = b"ethb".to_owned(); + let id = [0; 4]; + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Err(())); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageRejected(bridge, id)), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_fail_on_call_decode() { + new_test_ext().execute_with(|| { + let bridge = b"ethb".to_owned(); + let id = [0; 4]; + + let mut message = + prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + message.call.0 = vec![]; + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageCallDecodeFailed( + bridge, id + )), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_emit_event_for_rejected_calls() { + new_test_ext().execute_with(|| { + let bridge = b"ethb".to_owned(); + let id = [0; 4]; + + let call = Call::System(>::fill_block(Perbill::from_percent(75))); + let weight = call.get_dispatch_info().weight; + let mut message = prepare_root_message(call); + message.weight = weight; + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageCallRejected(bridge, id)), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_dispatch_bridge_message_from_root_origin() { + new_test_ext().execute_with(|| { + let bridge = b"ethb".to_owned(); + let id = [0; 4]; + let message = prepare_root_message(Call::System(>::remark(vec![1, 2, 3]))); + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageDispatched( + bridge, + id, + Ok(()) + )), + topics: vec![], + }], + ); + }); + } + + #[test] + fn should_dispatch_bridge_message_from_target_origin() { + new_test_ext().execute_with(|| { + let id = [0; 4]; + let bridge = b"ethb".to_owned(); + + let call = Call::System(>::remark(vec![])); + let message = prepare_target_message(call); + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageDispatched( + bridge, + id, + Ok(()) + )), + topics: vec![], + }], + ); + }) + } + + #[test] + fn should_dispatch_bridge_message_from_source_origin() { + new_test_ext().execute_with(|| { + let id = [0; 4]; + let bridge = b"ethb".to_owned(); + + let call = Call::System(>::remark(vec![])); + let message = prepare_source_message(call); + + System::set_block_number(1); + Dispatch::dispatch(bridge, id, Ok(message)); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::call_dispatch(call_dispatch::Event::::MessageDispatched( + bridge, + id, + Ok(()) + )), + topics: vec![], + }], + ); + }) + } + + #[test] + fn origin_is_checked_when_verifying_sending_message_using_source_root_account() { + let call = Call::System(>::remark(vec![])); + let message = prepare_root_message(call); + + // When message is sent by Root, CallOrigin::SourceRoot is allowed + assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(None))); + + // when message is sent by some real account, CallOrigin::SourceRoot is not allowed + assert!(matches!( + verify_message_origin(&RawOrigin::Signed(1), &message), + Err(BadOrigin) + )); + } + + #[test] + fn origin_is_checked_when_verifying_sending_message_using_target_account() { + let call = Call::System(>::remark(vec![])); + let message = prepare_target_message(call); + + // When message is sent by Root, CallOrigin::TargetAccount is not allowed + assert!(matches!( + verify_message_origin(&RawOrigin::Root, &message), + Err(BadOrigin) + )); + + // When message is sent by some other account, it is rejected + assert!(matches!( + verify_message_origin(&RawOrigin::Signed(2), &message), + Err(BadOrigin) + )); + + // When message is sent by a real account, it is allowed to have origin + // CallOrigin::TargetAccount + assert!(matches!( + verify_message_origin(&RawOrigin::Signed(1), &message), + Ok(Some(1)) + )); + } + + #[test] + fn origin_is_checked_when_verifying_sending_message_using_source_account() { + let call = Call::System(>::remark(vec![])); + let message = prepare_source_message(call); + + // Sending a message from the expected origin account works + assert!(matches!( + verify_message_origin(&RawOrigin::Signed(1), &message), + Ok(Some(1)) + )); + + // If we send a message from a different account, it is rejected + assert!(matches!( + verify_message_origin(&RawOrigin::Signed(2), &message), + Err(BadOrigin) + )); + + // If we try and send the message from Root, it is also rejected + assert!(matches!( + verify_message_origin(&RawOrigin::Root, &message), + Err(BadOrigin) + )); + } +} diff --git a/polkadot/modules/ethereum-contract-builtin/Cargo.toml b/polkadot/modules/ethereum-contract-builtin/Cargo.toml new file mode 100644 index 00000000000..82e287a3abd --- /dev/null +++ b/polkadot/modules/ethereum-contract-builtin/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "ethereum-contract-builtin" +description = "Small crate that helps Solidity contract to verify finality proof." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +ethereum-types = "0.11.0" +finality-grandpa = "0.14.0" +hex = "0.4" +log = "0.4.14" + +# Runtime/chain specific dependencies + +rialto-runtime = { path = "../../bin/rialto/runtime" } + +# Substrate Dependencies + +sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/modules/ethereum-contract-builtin/src/lib.rs b/polkadot/modules/ethereum-contract-builtin/src/lib.rs new file mode 100644 index 00000000000..47c4452aee6 --- /dev/null +++ b/polkadot/modules/ethereum-contract-builtin/src/lib.rs @@ -0,0 +1,374 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use codec::{Decode, Encode}; +use ethereum_types::U256; +use finality_grandpa::voter_set::VoterSet; +use rialto_runtime::{Block, BlockNumber, Hash, Header as RuntimeHeader}; +use sp_blockchain::Error as ClientError; +use sp_finality_grandpa::{AuthorityList, ConsensusLog, GRANDPA_ENGINE_ID}; + +/// Builtin errors. +#[derive(Debug)] +pub enum Error { + /// Failed to decode block number. + BlockNumberDecode, + /// Failed to decode Substrate header. + HeaderDecode(codec::Error), + /// Failed to decode best voters set. + BestSetDecode(codec::Error), + /// Best voters set is invalid. + InvalidBestSet, + /// Failed to decode finality proof. + FinalityProofDecode(codec::Error), + /// Failed to verify justification. + JustificationVerify(Box), +} + +/// Substrate header. +#[derive(Debug, PartialEq)] +pub struct Header { + /// Header hash. + pub hash: Hash, + /// Parent header hash. + pub parent_hash: Hash, + /// Header number. + pub number: BlockNumber, + /// GRANDPA validators change signal. + pub signal: Option, +} + +/// GRANDPA validators set change signal. +#[derive(Debug, PartialEq)] +pub struct ValidatorsSetSignal { + /// Signal delay. + pub delay: BlockNumber, + /// New validators set. + pub validators: Vec, +} + +/// Convert from U256 to BlockNumber. Fails if `U256` value isn't fitting within `BlockNumber` +/// limits (the runtime referenced by this module uses u32 as `BlockNumber`). +pub fn to_substrate_block_number(number: U256) -> Result { + let substrate_block_number = match number == number.low_u32().into() { + true => Ok(number.low_u32()), + false => Err(Error::BlockNumberDecode), + }; + + log::trace!( + target: "bridge-builtin", + "Parsed Substrate block number from {}: {:?}", + number, + substrate_block_number, + ); + + substrate_block_number +} + +/// Convert from BlockNumber to U256. +pub fn from_substrate_block_number(number: BlockNumber) -> Result { + Ok(U256::from(number as u64)) +} + +/// Parse Substrate header. +pub fn parse_substrate_header(raw_header: &[u8]) -> Result { + let substrate_header = RuntimeHeader::decode(&mut &*raw_header) + .map(|header| Header { + hash: header.hash(), + parent_hash: header.parent_hash, + number: header.number, + signal: sp_runtime::traits::Header::digest(&header) + .log(|log| { + log.as_consensus().and_then(|(engine_id, log)| { + if engine_id == GRANDPA_ENGINE_ID { + Some(log) + } else { + None + } + }) + }) + .and_then(|log| ConsensusLog::decode(&mut &*log).ok()) + .and_then(|log| match log { + ConsensusLog::ScheduledChange(scheduled_change) => Some(ValidatorsSetSignal { + delay: scheduled_change.delay, + validators: scheduled_change.next_authorities.encode(), + }), + _ => None, + }), + }) + .map_err(Error::HeaderDecode); + + log::debug!( + target: "bridge-builtin", + "Parsed Substrate header {}: {:?}", + if substrate_header.is_ok() { + format!("<{}-bytes-blob>", raw_header.len()) + } else { + hex::encode(raw_header) + }, + substrate_header, + ); + + substrate_header +} + +/// Verify GRANDPA finality proof. +pub fn verify_substrate_finality_proof( + finality_target_number: BlockNumber, + finality_target_hash: Hash, + best_set_id: u64, + raw_best_set: &[u8], + raw_finality_proof: &[u8], +) -> Result<(), Error> { + let best_set = AuthorityList::decode(&mut &*raw_best_set) + .map_err(Error::BestSetDecode) + .and_then(|authorities| VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet)); + + log::debug!( + target: "bridge-builtin", + "Parsed Substrate authorities set {}: {:?}", + if best_set.is_ok() { + format!("<{}-bytes-blob>", raw_best_set.len()) + } else { + hex::encode(raw_best_set) + }, + best_set, + ); + + let best_set = best_set?; + + let verify_result = sc_finality_grandpa::GrandpaJustification::::decode_and_verify_finalizes( + &raw_finality_proof, + (finality_target_hash, finality_target_number), + best_set_id, + &best_set, + ) + .map_err(Box::new) + .map_err(Error::JustificationVerify) + .map(|_| ()); + + log::debug!( + target: "bridge-builtin", + "Verified Substrate finality proof {}: {:?}", + if verify_result.is_ok() { + format!("<{}-bytes-blob>", raw_finality_proof.len()) + } else { + hex::encode(raw_finality_proof) + }, + verify_result, + ); + + verify_result +} + +#[cfg(test)] +mod tests { + use super::*; + use rialto_runtime::DigestItem; + use sp_core::crypto::Public; + use sp_finality_grandpa::{AuthorityId, ScheduledChange}; + use sp_runtime::generic::Digest; + + #[test] + fn to_substrate_block_number_succeeds() { + assert_eq!(to_substrate_block_number(U256::zero()).unwrap(), 0); + assert_eq!( + to_substrate_block_number(U256::from(std::u32::MAX as u64)).unwrap(), + 0xFFFFFFFF + ); + } + + #[test] + fn to_substrate_block_number_fails() { + assert!(matches!( + to_substrate_block_number(U256::from(std::u32::MAX as u64 + 1)), + Err(Error::BlockNumberDecode) + )); + } + + #[test] + fn from_substrate_block_number_succeeds() { + assert_eq!(from_substrate_block_number(0).unwrap(), U256::zero()); + assert_eq!( + from_substrate_block_number(std::u32::MAX).unwrap(), + U256::from(std::u32::MAX) + ); + } + + #[test] + fn substrate_header_without_signal_parsed() { + let raw_header = RuntimeHeader { + parent_hash: [0u8; 32].into(), + number: 0, + state_root: "b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e7" + .parse() + .unwrap(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + .parse() + .unwrap(), + digest: Default::default(), + } + .encode(); + assert_eq!( + raw_header, + hex::decode("000000000000000000000000000000000000000000000000000000000000000000b2fc47904df5e355c6ab476d89fbc0733aeddbe302f0b94ba4eea9283f7e89e703170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c11131400").unwrap(), + ); + + assert_eq!( + parse_substrate_header(&raw_header).unwrap(), + Header { + hash: "afbbeb92bf6ff14f60bdef0aa89f043dd403659ae82665238810ace0d761f6d0" + .parse() + .unwrap(), + parent_hash: Default::default(), + number: 0, + signal: None, + }, + ); + } + + #[test] + fn substrate_header_with_signal_parsed() { + let authorities = vec![ + (AuthorityId::from_slice(&[1; 32]), 101), + (AuthorityId::from_slice(&[3; 32]), 103), + ]; + let mut digest = Digest::default(); + digest.push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + ConsensusLog::ScheduledChange(ScheduledChange { + next_authorities: authorities.clone(), + delay: 8, + }) + .encode(), + )); + + let raw_header = RuntimeHeader { + parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" + .parse() + .unwrap(), + number: 8, + state_root: "822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aa" + .parse() + .unwrap(), + extrinsics_root: "e7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928" + .parse() + .unwrap(), + digest, + } + .encode(); + assert_eq!( + raw_header, + hex::decode("c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b20822d6b412033aa9ac8e1722918eec5f25633529225754b3d4149982f5cacd4aae7b07c0ce2799416ce7877b9cefc7f596bea5e8813bb2a0abf760414073ca928040446524e4b59010108010101010101010101010101010101010101010101010101010101010101010165000000000000000303030303030303030303030303030303030303030303030303030303030303670000000000000008000000").unwrap(), + ); + + assert_eq!( + parse_substrate_header(&raw_header).unwrap(), + Header { + hash: "3dfebb280bd87a4640f89d7f2adecd62b88148747bff5b63af6e1634ee37a56e" + .parse() + .unwrap(), + parent_hash: "c0ac300d4005141ea690f3df593e049739c227316eb7f05052f3ee077388b68b" + .parse() + .unwrap(), + number: 8, + signal: Some(ValidatorsSetSignal { + delay: 8, + validators: authorities.encode(), + }), + }, + ); + } + + /// Number of the example block with justification. + const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8; + /// Hash of the example block with justification. + const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775"; + /// Id of authorities set that have generated example justification. Could be computed by tracking + /// every set change in canonized headers. + const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0; + /// Encoded authorities set that has generated example justification. Could be fetched from `ScheduledChange` + /// digest of the block that has scheduled this set OR by calling `GrandpaApi::grandpa_authorities()` at + /// appropriate block. + const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000"; + /// Example justification. Could be fetched by calling 'chain_getBlock' RPC. + const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900"; + + #[test] + fn substrate_header_parse_fails() { + assert!(matches!(parse_substrate_header(&[]), Err(_))); + } + + #[test] + fn verify_substrate_finality_proof_succeeds() { + verify_substrate_finality_proof( + EXAMPLE_JUSTIFIED_BLOCK_NUMBER, + EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), + EXAMPLE_AUTHORITIES_SET_ID, + &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), + &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), + ) + .unwrap(); + } + + #[test] + fn verify_substrate_finality_proof_fails_when_wrong_block_is_finalized() { + verify_substrate_finality_proof( + 4, + Default::default(), + EXAMPLE_AUTHORITIES_SET_ID, + &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), + &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), + ) + .unwrap_err(); + } + + #[test] + fn verify_substrate_finality_proof_fails_when_wrong_set_is_provided() { + verify_substrate_finality_proof( + EXAMPLE_JUSTIFIED_BLOCK_NUMBER, + EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), + EXAMPLE_AUTHORITIES_SET_ID, + &hex::decode("deadbeef").unwrap(), + &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), + ) + .unwrap_err(); + } + + #[test] + fn verify_substrate_finality_proof_fails_when_wrong_set_id_is_provided() { + verify_substrate_finality_proof( + EXAMPLE_JUSTIFIED_BLOCK_NUMBER, + EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), + 42, + &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), + &hex::decode(EXAMPLE_JUSTIFICATION).unwrap(), + ) + .unwrap_err(); + } + + #[test] + fn verify_substrate_finality_proof_fails_when_wrong_proof_is_provided() { + verify_substrate_finality_proof( + EXAMPLE_JUSTIFIED_BLOCK_NUMBER, + EXAMPLE_JUSTIFIED_BLOCK_HASH.parse().unwrap(), + 0, + &hex::decode(EXAMPLE_AUTHORITIES_SET).unwrap(), + &hex::decode("deadbeef").unwrap(), + ) + .unwrap_err(); + } +} diff --git a/polkadot/modules/ethereum/Cargo.toml b/polkadot/modules/ethereum/Cargo.toml new file mode 100644 index 00000000000..fdd93ed7331 --- /dev/null +++ b/polkadot/modules/ethereum/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "pallet-bridge-eth-poa" +description = "A Substrate Runtime module that is able to verify PoA headers and their finality." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"], optional = true } +log = { version = "0.4.14", default-features = false } +serde = { version = "1.0", optional = true } + +# Bridge dependencies + +bp-eth-poa = { path = "../../primitives/ethereum-poa", default-features = false } + +# Substrate Dependencies + +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +libsecp256k1 = { version = "0.3.4", features = ["hmac"] } +hex-literal = "0.3" + +[features] +default = ["std"] +std = [ + "bp-eth-poa/std", + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "log/std", + "serde", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "libsecp256k1", +] diff --git a/polkadot/modules/ethereum/src/benchmarking.rs b/polkadot/modules/ethereum/src/benchmarking.rs new file mode 100644 index 00000000000..960dbe9afec --- /dev/null +++ b/polkadot/modules/ethereum/src/benchmarking.rs @@ -0,0 +1,270 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use super::*; + +use crate::test_utils::{ + build_custom_header, build_genesis_header, insert_header, validator_utils::*, validators_change_receipt, + HeaderBuilder, +}; + +use bp_eth_poa::{compute_merkle_root, U256}; +use frame_benchmarking::benchmarks_instance; +use frame_system::RawOrigin; + +benchmarks_instance! { + // Benchmark `import_unsigned_header` extrinsic with the best possible conditions: + // * Parent header is finalized. + // * New header doesn't require receipts. + // * Nothing is finalized by new header. + // * Nothing is pruned by new header. + import_unsigned_header_best_case { + let n in 1..1000; + + let num_validators = 2; + let initial_header = initialize_bench::(num_validators); + + // prepare header to be inserted + let header = build_custom_header( + &validator(1), + &initial_header, + |mut header| { + header.gas_limit = header.gas_limit + U256::from(n); + header + }, + ); + }: import_unsigned_header(RawOrigin::None, header, None) + verify { + let storage = BridgeStorage::::new(); + assert_eq!(storage.best_block().0.number, 1); + assert_eq!(storage.finalized_block().number, 0); + } + + // Our goal with this bench is to try and see the effect that finalizing difference ranges of + // blocks has on our import time. As such we need to make sure that we keep the number of + // validators fixed while changing the number blocks finalized (the complexity parameter) by + // importing the last header. + // + // One important thing to keep in mind is that the runtime provides a finality cache in order to + // reduce the overhead of header finalization. However, this is only triggered every 16 blocks. + import_unsigned_finality { + // Our complexity parameter, n, will represent the number of blocks imported before + // finalization. + let n in 1..7; + + let mut storage = BridgeStorage::::new(); + let num_validators: u32 = 2; + let initial_header = initialize_bench::(num_validators as usize); + + // Since we only have two validators we need to make sure the number of blocks is even to + // make sure the right validator signs the final block + let num_blocks = 2 * n; + let mut headers = Vec::new(); + let mut parent = initial_header.clone(); + + // Import a bunch of headers without any verification, will ensure that they're not + // finalized prematurely + for i in 1..=num_blocks { + let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); + let id = header.compute_id(); + insert_header(&mut storage, header.clone()); + headers.push(header.clone()); + parent = header; + } + + let last_header = headers.last().unwrap().clone(); + let last_authority = validator(1); + + // Need to make sure that the header we're going to import hasn't been inserted + // into storage already + let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); + }: import_unsigned_header(RawOrigin::None, header, None) + verify { + let storage = BridgeStorage::::new(); + assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); + assert_eq!(storage.finalized_block().number, num_blocks as u64); + } + + // Basically the exact same as `import_unsigned_finality` but with a different range for the + // complexity parameter. In this bench we use a larger range of blocks to see how performance + // changes when the finality cache kicks in (>16 blocks). + import_unsigned_finality_with_cache { + // Our complexity parameter, n, will represent the number of blocks imported before + // finalization. + let n in 7..100; + + let mut storage = BridgeStorage::::new(); + let num_validators: u32 = 2; + let initial_header = initialize_bench::(num_validators as usize); + + // Since we only have two validators we need to make sure the number of blocks is even to + // make sure the right validator signs the final block + let num_blocks = 2 * n; + let mut headers = Vec::new(); + let mut parent = initial_header.clone(); + + // Import a bunch of headers without any verification, will ensure that they're not + // finalized prematurely + for i in 1..=num_blocks { + let header = HeaderBuilder::with_parent(&parent).sign_by(&validator(0)); + let id = header.compute_id(); + insert_header(&mut storage, header.clone()); + headers.push(header.clone()); + parent = header; + } + + let last_header = headers.last().unwrap().clone(); + let last_authority = validator(1); + + // Need to make sure that the header we're going to import hasn't been inserted + // into storage already + let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority); + }: import_unsigned_header(RawOrigin::None, header, None) + verify { + let storage = BridgeStorage::::new(); + assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64); + assert_eq!(storage.finalized_block().number, num_blocks as u64); + } + + // A block import may trigger a pruning event, which adds extra work to the import progress. + // In this bench we trigger a pruning event in order to see how much extra time is spent by the + // runtime dealing with it. In the Ethereum Pallet, we're limited pruning to eight blocks in a + // single import, as dictated by MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT. + import_unsigned_pruning { + let n in 1..MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT as u32; + + let mut storage = BridgeStorage::::new(); + + let num_validators = 3; + let initial_header = initialize_bench::(num_validators as usize); + let validators = validators(num_validators); + + // Want to prune eligible blocks between [0, n) + BlocksToPrune::::put(PruningRange { + oldest_unpruned_block: 0, + oldest_block_to_keep: n as u64, + }); + + let mut parent = initial_header; + for i in 1..=n { + let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); + let id = header.compute_id(); + insert_header(&mut storage, header.clone()); + parent = header; + } + + let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators); + }: import_unsigned_header(RawOrigin::None, header, None) + verify { + let storage = BridgeStorage::::new(); + let max_pruned: u64 = (n - 1) as _; + assert_eq!(storage.best_block().0.number, (n + 1) as u64); + assert!(HeadersByNumber::::get(&0).is_none()); + assert!(HeadersByNumber::::get(&max_pruned).is_none()); + } + + // The goal of this bench is to import a block which contains a transaction receipt. The receipt + // will contain a validator set change. Verifying the receipt root is an expensive operation to + // do, which is why we're interested in benchmarking it. + import_unsigned_with_receipts { + let n in 1..100; + + let mut storage = BridgeStorage::::new(); + + let num_validators = 1; + let initial_header = initialize_bench::(num_validators as usize); + + let mut receipts = vec![]; + for i in 1..=n { + let receipt = validators_change_receipt(Default::default()); + receipts.push(receipt) + } + let encoded_receipts = receipts.iter().map(|r| r.rlp()); + + // We need this extra header since this is what signals a validator set transition. This + // will ensure that the next header is within the "Contract" window + let header1 = HeaderBuilder::with_parent(&initial_header).sign_by(&validator(0)); + insert_header(&mut storage, header1.clone()); + + let header = build_custom_header( + &validator(0), + &header1, + |mut header| { + // Logs Bloom signals a change in validator set + header.log_bloom = (&[0xff; 256]).into(); + header.receipts_root = compute_merkle_root(encoded_receipts); + header + }, + ); + }: import_unsigned_header(RawOrigin::None, header, Some(receipts)) + verify { + let storage = BridgeStorage::::new(); + assert_eq!(storage.best_block().0.number, 2); + } +} + +fn initialize_bench, I: Instance>(num_validators: usize) -> AuraHeader { + // Initialize storage with some initial header + let initial_header = build_genesis_header(&validator(0)); + let initial_difficulty = initial_header.difficulty; + let initial_validators = validators_addresses(num_validators as usize); + + initialize_storage::(&initial_header, initial_difficulty, &initial_validators); + + initial_header +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{run_test, TestRuntime}; + use frame_support::assert_ok; + + #[test] + fn insert_unsigned_header_best_case() { + run_test(1, |_| { + assert_ok!(test_benchmark_import_unsigned_header_best_case::()); + }); + } + + #[test] + fn insert_unsigned_header_finality() { + run_test(1, |_| { + assert_ok!(test_benchmark_import_unsigned_finality::()); + }); + } + + #[test] + fn insert_unsigned_header_finality_with_cache() { + run_test(1, |_| { + assert_ok!(test_benchmark_import_unsigned_finality_with_cache::()); + }); + } + + #[test] + fn insert_unsigned_header_pruning() { + run_test(1, |_| { + assert_ok!(test_benchmark_import_unsigned_pruning::()); + }); + } + + #[test] + fn insert_unsigned_header_receipts() { + run_test(1, |_| { + assert_ok!(test_benchmark_import_unsigned_with_receipts::()); + }); + } +} diff --git a/polkadot/modules/ethereum/src/error.rs b/polkadot/modules/ethereum/src/error.rs new file mode 100644 index 00000000000..ad798379da7 --- /dev/null +++ b/polkadot/modules/ethereum/src/error.rs @@ -0,0 +1,101 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use sp_runtime::RuntimeDebug; + +/// Header import error. +#[derive(Clone, Copy, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(PartialEq))] +pub enum Error { + /// The header is beyond last finalized and can not be imported. + AncientHeader = 0, + /// The header is already imported. + KnownHeader = 1, + /// Seal has an incorrect format. + InvalidSealArity = 2, + /// Block number isn't sensible. + RidiculousNumber = 3, + /// Block has too much gas used. + TooMuchGasUsed = 4, + /// Gas limit header field is invalid. + InvalidGasLimit = 5, + /// Extra data is of an invalid length. + ExtraDataOutOfBounds = 6, + /// Timestamp header overflowed. + TimestampOverflow = 7, + /// The parent header is missing from the blockchain. + MissingParentBlock = 8, + /// The header step is missing from the header. + MissingStep = 9, + /// The header signature is missing from the header. + MissingSignature = 10, + /// Empty steps are missing from the header. + MissingEmptySteps = 11, + /// The same author issued different votes at the same step. + DoubleVote = 12, + /// Validation proof insufficient. + InsufficientProof = 13, + /// Difficulty header field is invalid. + InvalidDifficulty = 14, + /// The received block is from an incorrect proposer. + NotValidator = 15, + /// Missing transaction receipts for the operation. + MissingTransactionsReceipts = 16, + /// Redundant transaction receipts are provided. + RedundantTransactionsReceipts = 17, + /// Provided transactions receipts are not matching the header. + TransactionsReceiptsMismatch = 18, + /// Can't accept unsigned header from the far future. + UnsignedTooFarInTheFuture = 19, + /// Trying to finalize sibling of finalized block. + TryingToFinalizeSibling = 20, + /// Header timestamp is ahead of on-chain timestamp + HeaderTimestampIsAhead = 21, +} + +impl Error { + pub fn msg(&self) -> &'static str { + match *self { + Error::AncientHeader => "Header is beyound last finalized and can not be imported", + Error::KnownHeader => "Header is already imported", + Error::InvalidSealArity => "Header has an incorrect seal", + Error::RidiculousNumber => "Header has too large number", + Error::TooMuchGasUsed => "Header has too much gas used", + Error::InvalidGasLimit => "Header has invalid gas limit", + Error::ExtraDataOutOfBounds => "Header has too large extra data", + Error::TimestampOverflow => "Header has too large timestamp", + Error::MissingParentBlock => "Header has unknown parent hash", + Error::MissingStep => "Header is missing step seal", + Error::MissingSignature => "Header is missing signature seal", + Error::MissingEmptySteps => "Header is missing empty steps seal", + Error::DoubleVote => "Header has invalid step in seal", + Error::InsufficientProof => "Header has insufficient proof", + Error::InvalidDifficulty => "Header has invalid difficulty", + Error::NotValidator => "Header is sealed by unexpected validator", + Error::MissingTransactionsReceipts => "The import operation requires transactions receipts", + Error::RedundantTransactionsReceipts => "Redundant transactions receipts are provided", + Error::TransactionsReceiptsMismatch => "Invalid transactions receipts provided", + Error::UnsignedTooFarInTheFuture => "The unsigned header is too far in future", + Error::TryingToFinalizeSibling => "Trying to finalize sibling of finalized block", + Error::HeaderTimestampIsAhead => "Header timestamp is ahead of on-chain timestamp", + } + } + + /// Return unique error code. + pub fn code(&self) -> u8 { + *self as u8 + } +} diff --git a/polkadot/modules/ethereum/src/finality.rs b/polkadot/modules/ethereum/src/finality.rs new file mode 100644 index 00000000000..58987c6b29b --- /dev/null +++ b/polkadot/modules/ethereum/src/finality.rs @@ -0,0 +1,556 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Error; +use crate::Storage; +use bp_eth_poa::{public_to_address, Address, AuraHeader, HeaderId, SealedEmptyStep, H256}; +use codec::{Decode, Encode}; +use sp_io::crypto::secp256k1_ecdsa_recover; +use sp_runtime::RuntimeDebug; +use sp_std::collections::{ + btree_map::{BTreeMap, Entry}, + btree_set::BTreeSet, + vec_deque::VecDeque, +}; +use sp_std::prelude::*; + +/// Cached finality votes for given block. +#[derive(RuntimeDebug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct CachedFinalityVotes { + /// True if we have stopped at best finalized block' sibling. This means + /// that we are trying to finalize block from fork that has forked before + /// best finalized. + pub stopped_at_finalized_sibling: bool, + /// Header ancestors that were read while we have been searching for + /// cached votes entry. Newest header has index 0. + pub unaccounted_ancestry: VecDeque<(HeaderId, Option, AuraHeader)>, + /// Cached finality votes, if they have been found. The associated + /// header is not included into `unaccounted_ancestry`. + pub votes: Option>, +} + +/// Finality effects. +#[derive(RuntimeDebug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct FinalityEffects { + /// Finalized headers. + pub finalized_headers: Vec<(HeaderId, Option)>, + /// Finality votes used in computation. + pub votes: FinalityVotes, +} + +/// Finality votes for given block. +#[derive(RuntimeDebug, Decode, Encode)] +#[cfg_attr(test, derive(Clone, PartialEq))] +pub struct FinalityVotes { + /// Number of votes per each validator. + pub votes: BTreeMap, + /// Ancestry blocks with oldest ancestors at the beginning and newest at the + /// end of the queue. + pub ancestry: VecDeque>, +} + +/// Information about block ancestor that is used in computations. +#[derive(RuntimeDebug, Decode, Encode)] +#[cfg_attr(test, derive(Clone, Default, PartialEq))] +pub struct FinalityAncestor { + /// Bock id. + pub id: HeaderId, + /// Block submitter. + pub submitter: Option, + /// Validators that have signed this block and empty steps on top + /// of this block. + pub signers: BTreeSet
, +} + +/// Tries to finalize blocks when given block is imported. +/// +/// Returns numbers and hashes of finalized blocks in ascending order. +pub fn finalize_blocks( + storage: &S, + best_finalized: HeaderId, + header_validators: (HeaderId, &[Address]), + id: HeaderId, + submitter: Option<&S::Submitter>, + header: &AuraHeader, + two_thirds_majority_transition: u64, +) -> Result, Error> { + // compute count of voters for every unfinalized block in ancestry + let validators = header_validators.1.iter().collect(); + let votes = prepare_votes( + header + .parent_id() + .map(|parent_id| { + storage.cached_finality_votes(&parent_id, &best_finalized, |hash| { + *hash == header_validators.0.hash || *hash == best_finalized.hash + }) + }) + .unwrap_or_default(), + best_finalized, + &validators, + id, + header, + submitter.cloned(), + )?; + + // now let's iterate in reverse order && find just finalized blocks + let mut finalized_headers = Vec::new(); + let mut current_votes = votes.votes.clone(); + for ancestor in &votes.ancestry { + if !is_finalized( + &validators, + ¤t_votes, + ancestor.id.number >= two_thirds_majority_transition, + ) { + break; + } + + remove_signers_votes(&ancestor.signers, &mut current_votes); + finalized_headers.push((ancestor.id, ancestor.submitter.clone())); + } + + Ok(FinalityEffects { + finalized_headers, + votes, + }) +} + +/// Returns true if there are enough votes to treat this header as finalized. +fn is_finalized( + validators: &BTreeSet<&Address>, + votes: &BTreeMap, + requires_two_thirds_majority: bool, +) -> bool { + (!requires_two_thirds_majority && votes.len() * 2 > validators.len()) + || (requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2) +} + +/// Prepare 'votes' of header and its ancestors' signers. +pub(crate) fn prepare_votes( + mut cached_votes: CachedFinalityVotes, + best_finalized: HeaderId, + validators: &BTreeSet<&Address>, + id: HeaderId, + header: &AuraHeader, + submitter: Option, +) -> Result, Error> { + // if we have reached finalized block sibling, then we're trying + // to switch finalized blocks + if cached_votes.stopped_at_finalized_sibling { + return Err(Error::TryingToFinalizeSibling); + } + + // this fn can only work with single validators set + if !validators.contains(&header.author) { + return Err(Error::NotValidator); + } + + // now we have votes that were valid when some block B has been inserted + // things may have changed a bit, but we do not need to read anything else + // from the db, because we have ancestry + // so the only thing we need to do is: + // 1) remove votes from blocks that have been finalized after B has been inserted; + // 2) add votes from B descendants + let mut votes = cached_votes.votes.unwrap_or_default(); + + // remove votes from finalized blocks + while let Some(old_ancestor) = votes.ancestry.pop_front() { + if old_ancestor.id.number > best_finalized.number { + votes.ancestry.push_front(old_ancestor); + break; + } + + remove_signers_votes(&old_ancestor.signers, &mut votes.votes); + } + + // add votes from new blocks + let mut parent_empty_step_signers = empty_steps_signers(header); + let mut unaccounted_ancestry = VecDeque::new(); + while let Some((ancestor_id, ancestor_submitter, ancestor)) = cached_votes.unaccounted_ancestry.pop_front() { + let mut signers = empty_steps_signers(&ancestor); + sp_std::mem::swap(&mut signers, &mut parent_empty_step_signers); + signers.insert(ancestor.author); + + add_signers_votes(validators, &signers, &mut votes.votes)?; + + unaccounted_ancestry.push_front(FinalityAncestor { + id: ancestor_id, + submitter: ancestor_submitter, + signers, + }); + } + votes.ancestry.extend(unaccounted_ancestry); + + // add votes from block itself + let mut header_signers = BTreeSet::new(); + header_signers.insert(header.author); + *votes.votes.entry(header.author).or_insert(0) += 1; + votes.ancestry.push_back(FinalityAncestor { + id, + submitter, + signers: header_signers, + }); + + Ok(votes) +} + +/// Increase count of 'votes' for every passed signer. +/// Fails if at least one of signers is not in the `validators` set. +fn add_signers_votes( + validators: &BTreeSet<&Address>, + signers_to_add: &BTreeSet
, + votes: &mut BTreeMap, +) -> Result<(), Error> { + for signer in signers_to_add { + if !validators.contains(signer) { + return Err(Error::NotValidator); + } + + *votes.entry(*signer).or_insert(0) += 1; + } + + Ok(()) +} + +/// Decrease 'votes' count for every passed signer. +fn remove_signers_votes(signers_to_remove: &BTreeSet
, votes: &mut BTreeMap) { + for signer in signers_to_remove { + match votes.entry(*signer) { + Entry::Occupied(mut entry) => { + if *entry.get() <= 1 { + entry.remove(); + } else { + *entry.get_mut() -= 1; + } + } + Entry::Vacant(_) => unreachable!("we only remove signers that have been added; qed"), + } + } +} + +/// Returns unique set of empty steps signers. +fn empty_steps_signers(header: &AuraHeader) -> BTreeSet
{ + header + .empty_steps() + .into_iter() + .flatten() + .filter_map(|step| empty_step_signer(&step, &header.parent_hash)) + .collect::>() +} + +/// Returns author of empty step signature. +fn empty_step_signer(empty_step: &SealedEmptyStep, parent_hash: &H256) -> Option
{ + let message = empty_step.message(parent_hash); + secp256k1_ecdsa_recover(empty_step.signature.as_fixed_bytes(), message.as_fixed_bytes()) + .ok() + .map(|public| public_to_address(&public)) +} + +impl Default for CachedFinalityVotes { + fn default() -> Self { + CachedFinalityVotes { + stopped_at_finalized_sibling: false, + unaccounted_ancestry: VecDeque::new(), + votes: None, + } + } +} + +impl Default for FinalityVotes { + fn default() -> Self { + FinalityVotes { + votes: BTreeMap::new(), + ancestry: VecDeque::new(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime}; + use crate::{BridgeStorage, FinalityCache, HeaderToImport}; + use frame_support::StorageMap; + + const TOTAL_VALIDATORS: usize = 5; + + #[test] + fn verifies_header_author() { + run_test(TOTAL_VALIDATORS, |_| { + assert_eq!( + finalize_blocks( + &BridgeStorage::::new(), + Default::default(), + (Default::default(), &[]), + Default::default(), + None, + &AuraHeader::default(), + 0, + ), + Err(Error::NotValidator), + ); + }); + } + + #[test] + fn finalize_blocks_works() { + run_test(TOTAL_VALIDATORS, |ctx| { + // let's say we have 5 validators (we need 'votes' from 3 validators to achieve + // finality) + let mut storage = BridgeStorage::::new(); + + // when header#1 is inserted, nothing is finalized (1 vote) + let header1 = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(0)); + let id1 = header1.compute_id(); + let mut header_to_import = HeaderToImport { + context: storage.import_context(None, &header1.parent_hash).unwrap(), + is_best: true, + id: id1, + header: header1, + total_difficulty: 0.into(), + enacted_change: None, + scheduled_change: None, + finality_votes: Default::default(), + }; + assert_eq!( + finalize_blocks( + &storage, + ctx.genesis.compute_id(), + (Default::default(), &ctx.addresses), + id1, + None, + &header_to_import.header, + u64::max_value(), + ) + .map(|eff| eff.finalized_headers), + Ok(Vec::new()), + ); + storage.insert_header(header_to_import.clone()); + + // when header#2 is inserted, nothing is finalized (2 votes) + header_to_import.header = HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1)); + header_to_import.id = header_to_import.header.compute_id(); + let id2 = header_to_import.header.compute_id(); + assert_eq!( + finalize_blocks( + &storage, + ctx.genesis.compute_id(), + (Default::default(), &ctx.addresses), + id2, + None, + &header_to_import.header, + u64::max_value(), + ) + .map(|eff| eff.finalized_headers), + Ok(Vec::new()), + ); + storage.insert_header(header_to_import.clone()); + + // when header#3 is inserted, header#1 is finalized (3 votes) + header_to_import.header = HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2)); + header_to_import.id = header_to_import.header.compute_id(); + let id3 = header_to_import.header.compute_id(); + assert_eq!( + finalize_blocks( + &storage, + ctx.genesis.compute_id(), + (Default::default(), &ctx.addresses), + id3, + None, + &header_to_import.header, + u64::max_value(), + ) + .map(|eff| eff.finalized_headers), + Ok(vec![(id1, None)]), + ); + storage.insert_header(header_to_import); + }); + } + + #[test] + fn cached_votes_are_updated_with_ancestry() { + // we're inserting header#5 + // cached votes are from header#3 + // header#4 has finalized header#1 and header#2 + // => when inserting header#5, we need to: + // 1) remove votes from header#1 and header#2 + // 2) add votes from header#4 and header#5 + let validators = validators_addresses(5); + let headers = (1..6) + .map(|number| HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1))) + .collect::>(); + let ancestry = headers + .iter() + .map(|header| FinalityAncestor { + id: header.compute_id(), + signers: vec![header.author].into_iter().collect(), + ..Default::default() + }) + .collect::>(); + let header5 = headers[4].clone(); + assert_eq!( + prepare_votes::<()>( + CachedFinalityVotes { + stopped_at_finalized_sibling: false, + unaccounted_ancestry: vec![(headers[3].compute_id(), None, headers[3].clone()),] + .into_iter() + .collect(), + votes: Some(FinalityVotes { + votes: vec![(validators[0], 1), (validators[1], 1), (validators[2], 1),] + .into_iter() + .collect(), + ancestry: ancestry[..3].iter().cloned().collect(), + }), + }, + headers[1].compute_id(), + &validators.iter().collect(), + header5.compute_id(), + &header5, + None, + ) + .unwrap(), + FinalityVotes { + votes: vec![(validators[2], 1), (validators[3], 1), (validators[4], 1),] + .into_iter() + .collect(), + ancestry: ancestry[2..].iter().cloned().collect(), + }, + ); + } + + #[test] + fn prepare_votes_respects_finality_cache() { + run_test(TOTAL_VALIDATORS, |ctx| { + // we need signatures of 3 validators to finalize block + let mut storage = BridgeStorage::::new(); + + // headers 1..3 are signed by validator#0 + // headers 4..6 are signed by validator#1 + // headers 7..9 are signed by validator#2 + let mut hashes = Vec::new(); + let mut headers = Vec::new(); + let mut ancestry = Vec::new(); + let mut parent_hash = ctx.genesis.compute_hash(); + for i in 1..10 { + let header = HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3)); + let id = header.compute_id(); + insert_header(&mut storage, header.clone()); + hashes.push(id.hash); + ancestry.push(FinalityAncestor { + id: header.compute_id(), + submitter: None, + signers: vec![header.author].into_iter().collect(), + }); + headers.push(header); + parent_hash = id.hash; + } + + // when we're inserting header#7 and last finalized header is 0: + // check that votes at #7 are computed correctly without cache + let expected_votes_at_7 = FinalityVotes { + votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 3), (ctx.addresses[2], 1)] + .into_iter() + .collect(), + ancestry: ancestry[..7].iter().cloned().collect(), + }; + let id7 = headers[6].compute_id(); + assert_eq!( + prepare_votes( + storage.cached_finality_votes( + &headers.get(5).unwrap().compute_id(), + &ctx.genesis.compute_id(), + |_| false, + ), + Default::default(), + &ctx.addresses.iter().collect(), + id7, + headers.get(6).unwrap(), + None, + ) + .unwrap(), + expected_votes_at_7, + ); + + // cached votes at #5 + let expected_votes_at_5 = FinalityVotes { + votes: vec![(ctx.addresses[0], 3), (ctx.addresses[1], 2)].into_iter().collect(), + ancestry: ancestry[..5].iter().cloned().collect(), + }; + FinalityCache::::insert(hashes[4], expected_votes_at_5); + + // when we're inserting header#7 and last finalized header is 0: + // check that votes at #7 are computed correctly with cache + assert_eq!( + prepare_votes( + storage.cached_finality_votes( + &headers.get(5).unwrap().compute_id(), + &ctx.genesis.compute_id(), + |_| false, + ), + Default::default(), + &ctx.addresses.iter().collect(), + id7, + headers.get(6).unwrap(), + None, + ) + .unwrap(), + expected_votes_at_7, + ); + + // when we're inserting header#7 and last finalized header is 3: + // check that votes at #7 are computed correctly with cache + let expected_votes_at_7 = FinalityVotes { + votes: vec![(ctx.addresses[1], 3), (ctx.addresses[2], 1)].into_iter().collect(), + ancestry: ancestry[3..7].iter().cloned().collect(), + }; + assert_eq!( + prepare_votes( + storage.cached_finality_votes( + &headers.get(5).unwrap().compute_id(), + &headers.get(2).unwrap().compute_id(), + |hash| *hash == hashes[2], + ), + headers[2].compute_id(), + &ctx.addresses.iter().collect(), + id7, + headers.get(6).unwrap(), + None, + ) + .unwrap(), + expected_votes_at_7, + ); + }); + } + + #[test] + fn prepare_votes_fails_when_finalized_sibling_is_in_ancestry() { + assert_eq!( + prepare_votes::<()>( + CachedFinalityVotes { + stopped_at_finalized_sibling: true, + ..Default::default() + }, + Default::default(), + &validators_addresses(3).iter().collect(), + Default::default(), + &Default::default(), + None, + ), + Err(Error::TryingToFinalizeSibling), + ); + } +} diff --git a/polkadot/modules/ethereum/src/import.rs b/polkadot/modules/ethereum/src/import.rs new file mode 100644 index 00000000000..8cd4c8a17c7 --- /dev/null +++ b/polkadot/modules/ethereum/src/import.rs @@ -0,0 +1,609 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Error; +use crate::finality::finalize_blocks; +use crate::validators::{Validators, ValidatorsConfiguration}; +use crate::verification::{is_importable_header, verify_aura_header}; +use crate::{AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage}; +use bp_eth_poa::{AuraHeader, HeaderId, Receipt}; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; + +/// Imports bunch of headers and updates blocks finality. +/// +/// Transactions receipts must be provided if `header_import_requires_receipts()` +/// has returned true. +/// If successful, returns tuple where first element is the number of useful headers +/// we have imported and the second element is the number of useless headers (duplicate) +/// we have NOT imported. +/// Returns error if fatal error has occured during import. Some valid headers may be +/// imported in this case. +/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) +#[allow(clippy::too_many_arguments)] +pub fn import_headers( + storage: &mut S, + pruning_strategy: &mut PS, + aura_config: &AuraConfiguration, + validators_config: &ValidatorsConfiguration, + submitter: Option, + headers: Vec<(AuraHeader, Option>)>, + chain_time: &CT, + finalized_headers: &mut BTreeMap, +) -> Result<(u64, u64), Error> { + let mut useful = 0; + let mut useless = 0; + for (header, receipts) in headers { + let import_result = import_header( + storage, + pruning_strategy, + aura_config, + validators_config, + submitter.clone(), + header, + chain_time, + receipts, + ); + + match import_result { + Ok((_, finalized)) => { + for (_, submitter) in finalized { + if let Some(submitter) = submitter { + *finalized_headers.entry(submitter).or_default() += 1; + } + } + useful += 1; + } + Err(Error::AncientHeader) | Err(Error::KnownHeader) => useless += 1, + Err(error) => return Err(error), + } + } + + Ok((useful, useless)) +} + +/// A vector of finalized headers and their submitters. +pub type FinalizedHeaders = Vec<(HeaderId, Option<::Submitter>)>; + +/// Imports given header and updates blocks finality (if required). +/// +/// Transactions receipts must be provided if `header_import_requires_receipts()` +/// has returned true. +/// +/// Returns imported block id and list of all finalized headers. +/// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415) +#[allow(clippy::too_many_arguments)] +pub fn import_header( + storage: &mut S, + pruning_strategy: &mut PS, + aura_config: &AuraConfiguration, + validators_config: &ValidatorsConfiguration, + submitter: Option, + header: AuraHeader, + chain_time: &CT, + receipts: Option>, +) -> Result<(HeaderId, FinalizedHeaders), Error> { + // first check that we are able to import this header at all + let (header_id, finalized_id) = is_importable_header(storage, &header)?; + + // verify header + let import_context = verify_aura_header(storage, aura_config, submitter, &header, chain_time)?; + + // check if block schedules new validators + let validators = Validators::new(validators_config); + let (scheduled_change, enacted_change) = validators.extract_validators_change(&header, receipts)?; + + // check if block finalizes some other blocks and corresponding scheduled validators + let validators_set = import_context.validators_set(); + let finalized_blocks = finalize_blocks( + storage, + finalized_id, + (validators_set.enact_block, &validators_set.validators), + header_id, + import_context.submitter(), + &header, + aura_config.two_thirds_majority_transition, + )?; + let enacted_change = enacted_change + .map(|validators| ChangeToEnact { + signal_block: None, + validators, + }) + .or_else(|| validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers)); + + // NOTE: we can't return Err() from anywhere below this line + // (because otherwise we'll have inconsistent storage if transaction will fail) + + // and finally insert the block + let (best_id, best_total_difficulty) = storage.best_block(); + let total_difficulty = import_context.total_difficulty() + header.difficulty; + let is_best = total_difficulty > best_total_difficulty; + storage.insert_header(import_context.into_import_header( + is_best, + header_id, + header, + total_difficulty, + enacted_change, + scheduled_change, + finalized_blocks.votes, + )); + + // compute upper border of updated pruning range + let new_best_block_id = if is_best { header_id } else { best_id }; + let new_best_finalized_block_id = finalized_blocks.finalized_headers.last().map(|(id, _)| *id); + let pruning_upper_bound = pruning_strategy.pruning_upper_bound( + new_best_block_id.number, + new_best_finalized_block_id + .map(|id| id.number) + .unwrap_or(finalized_id.number), + ); + + // now mark finalized headers && prune old headers + storage.finalize_and_prune_headers(new_best_finalized_block_id, pruning_upper_bound); + + Ok((header_id, finalized_blocks.finalized_headers)) +} + +/// Returns true if transactions receipts are required to import given header. +pub fn header_import_requires_receipts( + storage: &S, + validators_config: &ValidatorsConfiguration, + header: &AuraHeader, +) -> bool { + is_importable_header(storage, header) + .map(|_| Validators::new(validators_config)) + .map(|validators| validators.maybe_signals_validators_change(header)) + .unwrap_or(false) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{ + run_test, secret_to_address, test_aura_config, test_validators_config, validator, validators_addresses, + validators_change_receipt, HeaderBuilder, KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT, + }; + use crate::validators::ValidatorsSource; + use crate::DefaultInstance; + use crate::{BlocksToPrune, BridgeStorage, Headers, PruningRange}; + use frame_support::{StorageMap, StorageValue}; + use secp256k1::SecretKey; + + const TOTAL_VALIDATORS: usize = 3; + + #[test] + fn rejects_finalized_block_competitors() { + run_test(TOTAL_VALIDATORS, |_| { + let mut storage = BridgeStorage::::new(); + storage.finalize_and_prune_headers( + Some(HeaderId { + number: 100, + ..Default::default() + }), + 0, + ); + assert_eq!( + import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &test_validators_config(), + None, + Default::default(), + &(), + None, + ), + Err(Error::AncientHeader), + ); + }); + } + + #[test] + fn rejects_known_header() { + run_test(TOTAL_VALIDATORS, |ctx| { + let mut storage = BridgeStorage::::new(); + let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); + assert_eq!( + import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &test_validators_config(), + None, + header.clone(), + &(), + None, + ) + .map(|_| ()), + Ok(()), + ); + assert_eq!( + import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &test_validators_config(), + None, + header, + &(), + None, + ) + .map(|_| ()), + Err(Error::KnownHeader), + ); + }); + } + + #[test] + fn import_header_works() { + run_test(TOTAL_VALIDATORS, |ctx| { + let validators_config = ValidatorsConfiguration::Multi(vec![ + (0, ValidatorsSource::List(ctx.addresses.clone())), + (1, ValidatorsSource::List(validators_addresses(2))), + ]); + let mut storage = BridgeStorage::::new(); + let header = HeaderBuilder::with_parent(&ctx.genesis).sign_by(&validator(1)); + let hash = header.compute_hash(); + assert_eq!( + import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &validators_config, + None, + header, + &(), + None + ) + .map(|_| ()), + Ok(()), + ); + + // check that new validators will be used for next header + let imported_header = Headers::::get(&hash).unwrap(); + assert_eq!( + imported_header.next_validators_set_id, + 1, // new set is enacted from config + ); + }); + } + + #[test] + fn headers_are_pruned_during_import() { + run_test(TOTAL_VALIDATORS, |ctx| { + let validators_config = + ValidatorsConfiguration::Single(ValidatorsSource::Contract([3; 20].into(), ctx.addresses.clone())); + let validators = vec![validator(0), validator(1), validator(2)]; + let mut storage = BridgeStorage::::new(); + + // header [0..11] are finalizing blocks [0; 9] + // => since we want to keep 10 finalized blocks, we aren't pruning anything + let mut latest_block_id = Default::default(); + for i in 1..11 { + let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&validators); + let parent_id = header.parent_id().unwrap(); + + let (rolling_last_block_id, finalized_blocks) = import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &validators_config, + Some(100), + header, + &(), + None, + ) + .unwrap(); + match i { + 2..=10 => assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,), + _ => assert_eq!(finalized_blocks, vec![], "At {}", i), + } + latest_block_id = rolling_last_block_id; + } + assert!(storage.header(&ctx.genesis.compute_hash()).is_some()); + + // header 11 finalizes headers [10] AND schedules change + // => we prune header#0 + let header11 = HeaderBuilder::with_parent_number(10) + .log_bloom((&[0xff; 256]).into()) + .receipts_root( + "ead6c772ba0083bbff497ba0f4efe47c199a2655401096c21ab7450b6c466d97" + .parse() + .unwrap(), + ) + .sign_by_set(&validators); + let parent_id = header11.parent_id().unwrap(); + let (rolling_last_block_id, finalized_blocks) = import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &validators_config, + Some(101), + header11.clone(), + &(), + Some(vec![validators_change_receipt(latest_block_id.hash)]), + ) + .unwrap(); + assert_eq!(finalized_blocks, vec![(parent_id, Some(100))],); + assert!(storage.header(&ctx.genesis.compute_hash()).is_none()); + latest_block_id = rolling_last_block_id; + + // and now let's say validators 1 && 2 went offline + // => in the range 12-25 no blocks are finalized, but we still continue to prune old headers + // until header#11 is met. we can't prune #11, because it schedules change + let mut step = 56u64; + let mut expected_blocks = vec![(header11.compute_id(), Some(101))]; + for i in 12..25 { + let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) + .difficulty(i.into()) + .step(step) + .sign_by_set(&validators); + expected_blocks.push((header.compute_id(), Some(102))); + let (rolling_last_block_id, finalized_blocks) = import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &validators_config, + Some(102), + header, + &(), + None, + ) + .unwrap(); + assert_eq!(finalized_blocks, vec![],); + latest_block_id = rolling_last_block_id; + step += 3; + } + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 11, + oldest_block_to_keep: 14, + }, + ); + + // now let's insert block signed by validator 1 + // => blocks 11..24 are finalized and blocks 11..14 are pruned + step -= 2; + let header = HeaderBuilder::with_parent_hash(latest_block_id.hash) + .difficulty(25.into()) + .step(step) + .sign_by_set(&validators); + let (_, finalized_blocks) = import_header( + &mut storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &validators_config, + Some(103), + header, + &(), + None, + ) + .unwrap(); + assert_eq!(finalized_blocks, expected_blocks); + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 15, + oldest_block_to_keep: 15, + }, + ); + }); + } + + fn import_custom_block( + storage: &mut S, + validators: &[SecretKey], + header: AuraHeader, + ) -> Result { + let id = header.compute_id(); + import_header( + storage, + &mut KeepSomeHeadersBehindBest::default(), + &test_aura_config(), + &ValidatorsConfiguration::Single(ValidatorsSource::Contract( + [0; 20].into(), + validators.iter().map(secret_to_address).collect(), + )), + None, + header, + &(), + None, + ) + .map(|_| id) + } + + #[test] + fn import_of_non_best_block_may_finalize_blocks() { + run_test(TOTAL_VALIDATORS, |ctx| { + let mut storage = BridgeStorage::::new(); + + // insert headers (H1, validator1), (H2, validator1), (H3, validator1) + // making H3 the best header, without finalizing anything (we need 2 signatures) + let mut expected_best_block = Default::default(); + for i in 1..4 { + let step = 1 + i * TOTAL_VALIDATORS as u64; + expected_best_block = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(i - 1) + .step(step) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + } + let (best_block, best_difficulty) = storage.best_block(); + assert_eq!(best_block, expected_best_block); + assert_eq!(storage.finalized_block(), ctx.genesis.compute_id()); + + // insert headers (H1', validator1), (H2', validator2), finalizing H2, even though H3 + // has better difficulty than H2' (because there are more steps involved) + let mut expected_finalized_block = Default::default(); + let mut parent_hash = ctx.genesis.compute_hash(); + for i in 1..3 { + let step = i; + let id = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_hash(parent_hash) + .step(step) + .gas_limit((GAS_LIMIT + 1).into()) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + parent_hash = id.hash; + if i == 1 { + expected_finalized_block = id; + } + } + let (new_best_block, new_best_difficulty) = storage.best_block(); + assert_eq!(new_best_block, expected_best_block); + assert_eq!(new_best_difficulty, best_difficulty); + assert_eq!(storage.finalized_block(), expected_finalized_block); + }); + } + + #[test] + fn append_to_unfinalized_fork_fails() { + const VALIDATORS: u64 = 5; + run_test(VALIDATORS as usize, |ctx| { + let mut storage = BridgeStorage::::new(); + + // header1, authored by validator[2] is best common block between two competing forks + let header1 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(0) + .step(2) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + assert_eq!(storage.best_block().0, header1); + assert_eq!(storage.finalized_block().number, 0); + + // validator[3] has authored header2 (nothing is finalized yet) + let header2 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(1) + .step(3) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + assert_eq!(storage.best_block().0, header2); + assert_eq!(storage.finalized_block().number, 0); + + // validator[4] has authored header3 (header1 is finalized) + let header3 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(2) + .step(4) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + assert_eq!(storage.best_block().0, header3); + assert_eq!(storage.finalized_block(), header1); + + // validator[4] has authored 4 blocks: header2'...header5' (header1 is still finalized) + let header2_1 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(1) + .gas_limit((GAS_LIMIT + 1).into()) + .step(4) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + let header3_1 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_hash(header2_1.hash) + .step(4 + VALIDATORS) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + let header4_1 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_hash(header3_1.hash) + .step(4 + VALIDATORS * 2) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + let header5_1 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_hash(header4_1.hash) + .step(4 + VALIDATORS * 3) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + assert_eq!(storage.best_block().0, header5_1); + assert_eq!(storage.finalized_block(), header1); + + // when we import header4 { parent = header3 }, authored by validator[0], header2 is finalized + let header4 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(3) + .step(5) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + assert_eq!(storage.best_block().0, header5_1); + assert_eq!(storage.finalized_block(), header2); + + // when we import header5 { parent = header4 }, authored by validator[1], header3 is finalized + let header5 = import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_hash(header4.hash) + .step(6) + .sign_by_set(&ctx.validators), + ) + .unwrap(); + assert_eq!(storage.best_block().0, header5); + assert_eq!(storage.finalized_block(), header3); + + // import of header2'' { parent = header1 } fails, because it has number < best_finalized + assert_eq!( + import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(1) + .gas_limit((GAS_LIMIT + 1).into()) + .step(3) + .sign_by_set(&ctx.validators) + ), + Err(Error::AncientHeader), + ); + + // import of header6' should also fail because we're trying to append to fork thas + // has forked before finalized block + assert_eq!( + import_custom_block( + &mut storage, + &ctx.validators, + HeaderBuilder::with_parent_number(5) + .gas_limit((GAS_LIMIT + 1).into()) + .step(5 + VALIDATORS * 4) + .sign_by_set(&ctx.validators), + ), + Err(Error::TryingToFinalizeSibling), + ); + }); + } +} diff --git a/polkadot/modules/ethereum/src/lib.rs b/polkadot/modules/ethereum/src/lib.rs new file mode 100644 index 00000000000..aeb7d69f763 --- /dev/null +++ b/polkadot/modules/ethereum/src/lib.rs @@ -0,0 +1,1553 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] + +use crate::finality::{CachedFinalityVotes, FinalityVotes}; +use bp_eth_poa::{Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256}; +use codec::{Decode, Encode}; +use frame_support::{decl_module, decl_storage, traits::Get}; +use sp_runtime::{ + transaction_validity::{ + InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, TransactionValidity, + UnknownTransaction, ValidTransaction, + }, + RuntimeDebug, +}; +use sp_std::{cmp::Ord, collections::btree_map::BTreeMap, prelude::*}; + +pub use validators::{ValidatorsConfiguration, ValidatorsSource}; + +mod error; +mod finality; +mod import; +mod validators; +mod verification; + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + +#[cfg(test)] +mod mock; + +#[cfg(any(feature = "runtime-benchmarks", test))] +pub mod test_utils; + +/// Maximal number of blocks we're pruning in single import call. +const MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT: u64 = 8; + +/// Authority round engine configuration parameters. +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] +pub struct AuraConfiguration { + /// Empty step messages transition block. + pub empty_steps_transition: u64, + /// Transition block to strict empty steps validation. + pub strict_empty_steps_transition: u64, + /// Monotonic step validation transition block. + pub validate_step_transition: u64, + /// Chain score validation transition block. + pub validate_score_transition: u64, + /// First block for which a 2/3 quorum (instead of 1/2) is required. + pub two_thirds_majority_transition: u64, + /// Minimum gas limit. + pub min_gas_limit: U256, + /// Maximum gas limit. + pub max_gas_limit: U256, + /// Maximum size of extra data. + pub maximum_extra_data_size: u64, +} + +/// Transaction pool configuration. +/// +/// This is used to limit number of unsigned headers transactions in +/// the pool. We never use it to verify signed transactions. +pub struct PoolConfiguration { + /// Maximal difference between number of header from unsigned transaction + /// and current best block. This must be selected with caution - the more + /// is the difference, the more (potentially invalid) transactions could be + /// accepted to the pool and mined later (filling blocks with spam). + pub max_future_number_difference: u64, +} + +/// Block header as it is stored in the runtime storage. +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] +pub struct StoredHeader { + /// Submitter of this header. May be `None` if header has been submitted + /// using unsigned transaction. + pub submitter: Option, + /// The block header itself. + pub header: AuraHeader, + /// Total difficulty of the chain. + pub total_difficulty: U256, + /// The ID of set of validators that is expected to produce direct descendants of + /// this block. If header enacts new set, this would be the new set. Otherwise + /// this is the set that has produced the block itself. + /// The hash is the hash of block where validators set has been enacted. + pub next_validators_set_id: u64, + /// Hash of the last block which has **SCHEDULED** validators set change. + /// Note that signal doesn't mean that the set has been (or ever will be) enacted. + /// Note that the header may already be pruned. + pub last_signal_block: Option, +} + +/// Validators set as it is stored in the runtime storage. +#[derive(Encode, Decode, PartialEq, RuntimeDebug)] +#[cfg_attr(test, derive(Clone))] +pub struct ValidatorsSet { + /// Validators of this set. + pub validators: Vec
, + /// Hash of the block where this set has been signalled. None if this is the first set. + pub signal_block: Option, + /// Hash of the block where this set has been enacted. + pub enact_block: HeaderId, +} + +/// Validators set change as it is stored in the runtime storage. +#[derive(Encode, Decode, PartialEq, RuntimeDebug)] +#[cfg_attr(test, derive(Clone))] +pub struct AuraScheduledChange { + /// Validators of this set. + pub validators: Vec
, + /// Hash of the block which has emitted previous validators change signal. + pub prev_signal_block: Option, +} + +/// Header that we're importing. +#[derive(RuntimeDebug)] +#[cfg_attr(test, derive(Clone, PartialEq))] +pub struct HeaderToImport { + /// Header import context, + pub context: ImportContext, + /// Should we consider this header as best? + pub is_best: bool, + /// The id of the header. + pub id: HeaderId, + /// The header itself. + pub header: AuraHeader, + /// Total chain difficulty at the header. + pub total_difficulty: U256, + /// New validators set and the hash of block where it has been scheduled (if applicable). + /// Some if set is is enacted by this header. + pub enacted_change: Option, + /// Validators set scheduled change, if happened at the header. + pub scheduled_change: Option>, + /// Finality votes at this header. + pub finality_votes: FinalityVotes, +} + +/// Header that we're importing. +#[derive(RuntimeDebug)] +#[cfg_attr(test, derive(Clone, PartialEq))] +pub struct ChangeToEnact { + /// The id of the header where change has been scheduled. + /// None if it is a first set within current `ValidatorsSource`. + pub signal_block: Option, + /// Validators set that is enacted. + pub validators: Vec
, +} + +/// Blocks range that we want to prune. +#[derive(Encode, Decode, Default, RuntimeDebug, Clone, PartialEq)] +struct PruningRange { + /// Number of the oldest unpruned block(s). This might be the block that we do not + /// want to prune now (then it is equal to `oldest_block_to_keep`), or block that we + /// were unable to prune for whatever reason (i.e. if it isn't finalized yet and has + /// scheduled validators set change). + pub oldest_unpruned_block: u64, + /// Number of oldest block(s) that we want to keep. We want to prune blocks in range + /// [`oldest_unpruned_block`; `oldest_block_to_keep`). + pub oldest_block_to_keep: u64, +} + +/// Header import context. +/// +/// The import context contains information needed by the header verification +/// pipeline which is not directly part of the header being imported. This includes +/// information relating to its parent, and the current validator set (which +/// provide _context_ for the current header). +#[derive(RuntimeDebug)] +#[cfg_attr(test, derive(Clone, PartialEq))] +pub struct ImportContext { + submitter: Option, + parent_hash: H256, + parent_header: AuraHeader, + parent_total_difficulty: U256, + parent_scheduled_change: Option, + validators_set_id: u64, + validators_set: ValidatorsSet, + last_signal_block: Option, +} + +impl ImportContext { + /// Returns reference to header submitter (if known). + pub fn submitter(&self) -> Option<&Submitter> { + self.submitter.as_ref() + } + + /// Returns reference to parent header. + pub fn parent_header(&self) -> &AuraHeader { + &self.parent_header + } + + /// Returns total chain difficulty at parent block. + pub fn total_difficulty(&self) -> &U256 { + &self.parent_total_difficulty + } + + /// Returns the validator set change if the parent header has signaled a change. + pub fn parent_scheduled_change(&self) -> Option<&AuraScheduledChange> { + self.parent_scheduled_change.as_ref() + } + + /// Returns id of the set of validators. + pub fn validators_set_id(&self) -> u64 { + self.validators_set_id + } + + /// Returns reference to validators set for the block we're going to import. + pub fn validators_set(&self) -> &ValidatorsSet { + &self.validators_set + } + + /// Returns reference to the latest block which has signalled change of validators set. + /// This may point to parent if parent has signalled change. + pub fn last_signal_block(&self) -> Option { + match self.parent_scheduled_change { + Some(_) => Some(HeaderId { + number: self.parent_header.number, + hash: self.parent_hash, + }), + None => self.last_signal_block, + } + } + + /// Converts import context into header we're going to import. + #[allow(clippy::too_many_arguments)] + pub fn into_import_header( + self, + is_best: bool, + id: HeaderId, + header: AuraHeader, + total_difficulty: U256, + enacted_change: Option, + scheduled_change: Option>, + finality_votes: FinalityVotes, + ) -> HeaderToImport { + HeaderToImport { + context: self, + is_best, + id, + header, + total_difficulty, + enacted_change, + scheduled_change, + finality_votes, + } + } +} + +/// The storage that is used by the client. +/// +/// Storage modification must be discarded if block import has failed. +pub trait Storage { + /// Header submitter identifier. + type Submitter: Clone + Ord; + + /// Get best known block and total chain difficulty. + fn best_block(&self) -> (HeaderId, U256); + /// Get last finalized block. + fn finalized_block(&self) -> HeaderId; + /// Get imported header by its hash. + /// + /// Returns header and its submitter (if known). + fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)>; + /// Returns latest cached finality votes (if any) for block ancestors, starting + /// from `parent_hash` block and stopping at genesis block, best finalized block + /// or block where `stop_at` returns true. + fn cached_finality_votes( + &self, + parent: &HeaderId, + best_finalized: &HeaderId, + stop_at: impl Fn(&H256) -> bool, + ) -> CachedFinalityVotes; + /// Get header import context by parent header hash. + fn import_context( + &self, + submitter: Option, + parent_hash: &H256, + ) -> Option>; + /// Get new validators that are scheduled by given header and hash of the previous + /// block that has scheduled change. + fn scheduled_change(&self, hash: &H256) -> Option; + /// Insert imported header. + fn insert_header(&mut self, header: HeaderToImport); + /// Finalize given block and schedules pruning of all headers + /// with number < prune_end. + /// + /// The headers in the pruning range could be either finalized, or not. + /// It is the storage duty to ensure that unfinalized headers that have + /// scheduled changes won't be pruned until they or their competitors + /// are finalized. + fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64); +} + +/// Headers pruning strategy. +pub trait PruningStrategy: Default { + /// Return upper bound (exclusive) of headers pruning range. + /// + /// Every value that is returned from this function, must be greater or equal to the + /// previous value. Otherwise it will be ignored (we can't revert pruning). + /// + /// Pallet may prune both finalized and unfinalized blocks. But it can't give any + /// guarantees on when it will happen. Example: if some unfinalized block at height N + /// has scheduled validators set change, then the module won't prune any blocks with + /// number >= N even if strategy allows that. + /// + /// If your strategy allows pruning unfinalized blocks, this could lead to switch + /// between finalized forks (only if authorities are misbehaving). But since 50%+1 (or 2/3) + /// authorities are able to do whatever they want with the chain, this isn't considered + /// fatal. If your strategy only prunes finalized blocks, we'll never be able to finalize + /// header that isn't descendant of current best finalized block. + fn pruning_upper_bound(&mut self, best_number: u64, best_finalized_number: u64) -> u64; +} + +/// ChainTime represents the runtime on-chain time +pub trait ChainTime: Default { + /// Is a header timestamp ahead of the current on-chain time. + /// + /// Check whether `timestamp` is ahead (i.e greater than) the current on-chain + /// time. If so, return `true`, `false` otherwise. + fn is_timestamp_ahead(&self, timestamp: u64) -> bool; +} + +/// ChainTime implementation for the empty type. +/// +/// This implementation will allow a runtime without the timestamp pallet to use +/// the empty type as its ChainTime associated type. +impl ChainTime for () { + fn is_timestamp_ahead(&self, _: u64) -> bool { + false + } +} + +/// Callbacks for header submission rewards/penalties. +pub trait OnHeadersSubmitted { + /// Called when valid headers have been submitted. + /// + /// The submitter **must not** be rewarded for submitting valid headers, because greedy authority + /// could produce and submit multiple valid headers (without relaying them to other peers) and + /// get rewarded. Instead, the provider could track submitters and stop rewarding if too many + /// headers have been submitted without finalization. + fn on_valid_headers_submitted(submitter: AccountId, useful: u64, useless: u64); + /// Called when invalid headers have been submitted. + fn on_invalid_headers_submitted(submitter: AccountId); + /// Called when earlier submitted headers have been finalized. + /// + /// finalized is the number of headers that submitter has submitted and which + /// have been finalized. + fn on_valid_headers_finalized(submitter: AccountId, finalized: u64); +} + +impl OnHeadersSubmitted for () { + fn on_valid_headers_submitted(_submitter: AccountId, _useful: u64, _useless: u64) {} + fn on_invalid_headers_submitted(_submitter: AccountId) {} + fn on_valid_headers_finalized(_submitter: AccountId, _finalized: u64) {} +} + +/// The module configuration trait. +pub trait Config: frame_system::Config { + /// Aura configuration. + type AuraConfiguration: Get; + /// Validators configuration. + type ValidatorsConfiguration: Get; + + /// Interval (in blocks) for for finality votes caching. + /// If None, cache is disabled. + /// + /// Ideally, this should either be None (when we are sure that there won't + /// be any significant finalization delays), or something that is bit larger + /// than average finalization delay. + type FinalityVotesCachingInterval: Get>; + /// Headers pruning strategy. + type PruningStrategy: PruningStrategy; + /// Header timestamp verification against current on-chain time. + type ChainTime: ChainTime; + + /// Handler for headers submission result. + type OnHeadersSubmitted: OnHeadersSubmitted; +} + +decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + /// Import single Aura header. Requires transaction to be **UNSIGNED**. + #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + pub fn import_unsigned_header(origin, header: AuraHeader, receipts: Option>) { + frame_system::ensure_none(origin)?; + + import::import_header( + &mut BridgeStorage::::new(), + &mut T::PruningStrategy::default(), + &T::AuraConfiguration::get(), + &T::ValidatorsConfiguration::get(), + None, + header, + &T::ChainTime::default(), + receipts, + ).map_err(|e| e.msg())?; + } + + /// Import Aura chain headers in a single **SIGNED** transaction. + /// Ignores non-fatal errors (like when known header is provided), rewards + /// for successful headers import and penalizes for fatal errors. + /// + /// This should be used with caution - passing too many headers could lead to + /// enormous block production/import time. + #[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) + pub fn import_signed_headers(origin, headers_with_receipts: Vec<(AuraHeader, Option>)>) { + let submitter = frame_system::ensure_signed(origin)?; + let mut finalized_headers = BTreeMap::new(); + let import_result = import::import_headers( + &mut BridgeStorage::::new(), + &mut T::PruningStrategy::default(), + &T::AuraConfiguration::get(), + &T::ValidatorsConfiguration::get(), + Some(submitter.clone()), + headers_with_receipts, + &T::ChainTime::default(), + &mut finalized_headers, + ); + + // if we have finalized some headers, we will reward their submitters even + // if current submitter has provided some invalid headers + for (f_submitter, f_count) in finalized_headers { + T::OnHeadersSubmitted::on_valid_headers_finalized( + f_submitter, + f_count, + ); + } + + // now track/penalize current submitter for providing new headers + match import_result { + Ok((useful, useless)) => + T::OnHeadersSubmitted::on_valid_headers_submitted(submitter, useful, useless), + Err(error) => { + // even though we may have accept some headers, we do not want to reward someone + // who provides invalid headers + T::OnHeadersSubmitted::on_invalid_headers_submitted(submitter); + return Err(error.msg().into()); + }, + } + } + } +} + +decl_storage! { + trait Store for Pallet, I: Instance = DefaultInstance> as Bridge { + /// Best known block. + BestBlock: (HeaderId, U256); + /// Best finalized block. + FinalizedBlock: HeaderId; + /// Range of blocks that we want to prune. + BlocksToPrune: PruningRange; + /// Map of imported headers by hash. + Headers: map hasher(identity) H256 => Option>; + /// Map of imported header hashes by number. + HeadersByNumber: map hasher(blake2_128_concat) u64 => Option>; + /// Map of cached finality data by header hash. + FinalityCache: map hasher(identity) H256 => Option>; + /// The ID of next validator set. + NextValidatorsSetId: u64; + /// Map of validators sets by their id. + ValidatorsSets: map hasher(twox_64_concat) u64 => Option; + /// Validators sets reference count. Each header that is authored by this set increases + /// the reference count. When we prune this header, we decrease the reference count. + /// When it reaches zero, we are free to prune validator set as well. + ValidatorsSetsRc: map hasher(twox_64_concat) u64 => Option; + /// Map of validators set changes scheduled by given header. + ScheduledChanges: map hasher(identity) H256 => Option; + } + add_extra_genesis { + config(initial_header): AuraHeader; + config(initial_difficulty): U256; + config(initial_validators): Vec
; + build(|config| { + // the initial blocks should be selected so that: + // 1) it doesn't signal validators changes; + // 2) there are no scheduled validators changes from previous blocks; + // 3) (implied) all direct children of initial block are authored by the same validators set. + + assert!( + !config.initial_validators.is_empty(), + "Initial validators set can't be empty", + ); + + initialize_storage::( + &config.initial_header, + config.initial_difficulty, + &config.initial_validators, + ); + }) + } +} + +impl, I: Instance> Pallet { + /// Returns number and hash of the best block known to the bridge module. + /// The caller should only submit `import_header` transaction that makes + /// (or leads to making) other header the best one. + pub fn best_block() -> HeaderId { + BridgeStorage::::new().best_block().0 + } + + /// Returns number and hash of the best finalized block known to the bridge module. + pub fn finalized_block() -> HeaderId { + BridgeStorage::::new().finalized_block() + } + + /// Returns true if the import of given block requires transactions receipts. + pub fn is_import_requires_receipts(header: AuraHeader) -> bool { + import::header_import_requires_receipts( + &BridgeStorage::::new(), + &T::ValidatorsConfiguration::get(), + &header, + ) + } + + /// Returns true if header is known to the runtime. + pub fn is_known_block(hash: H256) -> bool { + BridgeStorage::::new().header(&hash).is_some() + } + + /// Verify that transaction is included into given finalized block. + pub fn verify_transaction_finalized( + block: H256, + tx_index: u64, + proof: &[(RawTransaction, RawTransactionReceipt)], + ) -> bool { + crate::verify_transaction_finalized(&BridgeStorage::::new(), block, tx_index, proof) + } +} + +impl, I: Instance> frame_support::unsigned::ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match *call { + Self::Call::import_unsigned_header(ref header, ref receipts) => { + let accept_result = verification::accept_aura_header_into_pool( + &BridgeStorage::::new(), + &T::AuraConfiguration::get(), + &T::ValidatorsConfiguration::get(), + &pool_configuration(), + header, + &T::ChainTime::default(), + receipts.as_ref(), + ); + + match accept_result { + Ok((requires, provides)) => Ok(ValidTransaction { + priority: TransactionPriority::max_value(), + requires, + provides, + longevity: TransactionLongevity::max_value(), + propagate: true, + }), + // UnsignedTooFarInTheFuture is the special error code used to limit + // number of transactions in the pool - we do not want to ban transaction + // in this case (see verification.rs for details) + Err(error::Error::UnsignedTooFarInTheFuture) => { + UnknownTransaction::Custom(error::Error::UnsignedTooFarInTheFuture.code()).into() + } + Err(error) => InvalidTransaction::Custom(error.code()).into(), + } + } + _ => InvalidTransaction::Call.into(), + } + } +} + +/// Runtime bridge storage. +#[derive(Default)] +pub struct BridgeStorage(sp_std::marker::PhantomData<(T, I)>); + +impl, I: Instance> BridgeStorage { + /// Create new BridgeStorage. + pub fn new() -> Self { + BridgeStorage(sp_std::marker::PhantomData::<(T, I)>::default()) + } + + /// Prune old blocks. + fn prune_blocks(&self, mut max_blocks_to_prune: u64, finalized_number: u64, prune_end: u64) { + let pruning_range = BlocksToPrune::::get(); + let mut new_pruning_range = pruning_range.clone(); + + // update oldest block we want to keep + if prune_end > new_pruning_range.oldest_block_to_keep { + new_pruning_range.oldest_block_to_keep = prune_end; + } + + // start pruning blocks + let begin = new_pruning_range.oldest_unpruned_block; + let end = new_pruning_range.oldest_block_to_keep; + log::trace!(target: "runtime", "Pruning blocks in range [{}..{})", begin, end); + for number in begin..end { + // if we can't prune anything => break + if max_blocks_to_prune == 0 { + break; + } + + // read hashes of blocks with given number and try to prune these blocks + let blocks_at_number = HeadersByNumber::::take(number); + if let Some(mut blocks_at_number) = blocks_at_number { + self.prune_blocks_by_hashes( + &mut max_blocks_to_prune, + finalized_number, + number, + &mut blocks_at_number, + ); + + // if we haven't pruned all blocks, remember unpruned + if !blocks_at_number.is_empty() { + HeadersByNumber::::insert(number, blocks_at_number); + break; + } + } + + // we have pruned all headers at number + new_pruning_range.oldest_unpruned_block = number + 1; + log::trace!( + target: "runtime", + "Oldest unpruned PoA header is now: {}", + new_pruning_range.oldest_unpruned_block, + ); + } + + // update pruning range in storage + if pruning_range != new_pruning_range { + BlocksToPrune::::put(new_pruning_range); + } + } + + /// Prune old blocks with given hashes. + fn prune_blocks_by_hashes( + &self, + max_blocks_to_prune: &mut u64, + finalized_number: u64, + number: u64, + blocks_at_number: &mut Vec, + ) { + // ensure that unfinalized headers we want to prune do not have scheduled changes + if number > finalized_number && blocks_at_number.iter().any(ScheduledChanges::::contains_key) { + return; + } + + // physically remove headers and (probably) obsolete validators sets + while let Some(hash) = blocks_at_number.pop() { + let header = Headers::::take(&hash); + log::trace!( + target: "runtime", + "Pruning PoA header: ({}, {})", + number, + hash, + ); + + ScheduledChanges::::remove(hash); + FinalityCache::::remove(hash); + if let Some(header) = header { + ValidatorsSetsRc::::mutate(header.next_validators_set_id, |rc| match *rc { + Some(rc) if rc > 1 => Some(rc - 1), + _ => None, + }); + } + + // check if we have already pruned too much headers in this call + *max_blocks_to_prune -= 1; + if *max_blocks_to_prune == 0 { + return; + } + } + } +} + +impl, I: Instance> Storage for BridgeStorage { + type Submitter = T::AccountId; + + fn best_block(&self) -> (HeaderId, U256) { + BestBlock::::get() + } + + fn finalized_block(&self) -> HeaderId { + FinalizedBlock::::get() + } + + fn header(&self, hash: &H256) -> Option<(AuraHeader, Option)> { + Headers::::get(hash).map(|header| (header.header, header.submitter)) + } + + fn cached_finality_votes( + &self, + parent: &HeaderId, + best_finalized: &HeaderId, + stop_at: impl Fn(&H256) -> bool, + ) -> CachedFinalityVotes { + let mut votes = CachedFinalityVotes::default(); + let mut current_id = *parent; + loop { + // if we have reached finalized block's sibling => stop with special signal + if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash { + votes.stopped_at_finalized_sibling = true; + return votes; + } + + // if we have reached target header => stop + if stop_at(¤t_id.hash) { + return votes; + } + + // if we have found cached votes => stop + let cached_votes = FinalityCache::::get(¤t_id.hash); + if let Some(cached_votes) = cached_votes { + votes.votes = Some(cached_votes); + return votes; + } + + // read next parent header id + let header = match Headers::::get(¤t_id.hash) { + Some(header) if header.header.number != 0 => header, + _ => return votes, + }; + let parent_id = header.header.parent_id().expect( + "only returns None at genesis header;\ + the header is proved to have number > 0;\ + qed", + ); + + votes + .unaccounted_ancestry + .push_back((current_id, header.submitter, header.header)); + + current_id = parent_id; + } + } + + fn import_context( + &self, + submitter: Option, + parent_hash: &H256, + ) -> Option> { + Headers::::get(parent_hash).map(|parent_header| { + let validators_set = ValidatorsSets::::get(parent_header.next_validators_set_id) + .expect("validators set is only pruned when last ref is pruned; there is a ref; qed"); + let parent_scheduled_change = ScheduledChanges::::get(parent_hash); + ImportContext { + submitter, + parent_hash: *parent_hash, + parent_header: parent_header.header, + parent_total_difficulty: parent_header.total_difficulty, + parent_scheduled_change, + validators_set_id: parent_header.next_validators_set_id, + validators_set, + last_signal_block: parent_header.last_signal_block, + } + }) + } + + fn scheduled_change(&self, hash: &H256) -> Option { + ScheduledChanges::::get(hash) + } + + fn insert_header(&mut self, header: HeaderToImport) { + if header.is_best { + BestBlock::::put((header.id, header.total_difficulty)); + } + if let Some(scheduled_change) = header.scheduled_change { + ScheduledChanges::::insert( + &header.id.hash, + AuraScheduledChange { + validators: scheduled_change, + prev_signal_block: header.context.last_signal_block, + }, + ); + } + let next_validators_set_id = match header.enacted_change { + Some(enacted_change) => { + let next_validators_set_id = NextValidatorsSetId::::mutate(|set_id| { + let next_set_id = *set_id; + *set_id += 1; + next_set_id + }); + ValidatorsSets::::insert( + next_validators_set_id, + ValidatorsSet { + validators: enacted_change.validators, + enact_block: header.id, + signal_block: enacted_change.signal_block, + }, + ); + ValidatorsSetsRc::::insert(next_validators_set_id, 1); + next_validators_set_id + } + None => { + ValidatorsSetsRc::::mutate(header.context.validators_set_id, |rc| { + *rc = Some(rc.map(|rc| rc + 1).unwrap_or(1)); + *rc + }); + header.context.validators_set_id + } + }; + + let finality_votes_caching_interval = T::FinalityVotesCachingInterval::get(); + if let Some(finality_votes_caching_interval) = finality_votes_caching_interval { + let cache_entry_required = header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0; + if cache_entry_required { + FinalityCache::::insert(header.id.hash, header.finality_votes); + } + } + + log::trace!( + target: "runtime", + "Inserting PoA header: ({}, {})", + header.header.number, + header.id.hash, + ); + + let last_signal_block = header.context.last_signal_block(); + HeadersByNumber::::append(header.id.number, header.id.hash); + Headers::::insert( + &header.id.hash, + StoredHeader { + submitter: header.context.submitter, + header: header.header, + total_difficulty: header.total_difficulty, + next_validators_set_id, + last_signal_block, + }, + ); + } + + fn finalize_and_prune_headers(&mut self, finalized: Option, prune_end: u64) { + // remember just finalized block + let finalized_number = finalized + .as_ref() + .map(|f| f.number) + .unwrap_or_else(|| FinalizedBlock::::get().number); + if let Some(finalized) = finalized { + log::trace!( + target: "runtime", + "Finalizing PoA header: ({}, {})", + finalized.number, + finalized.hash, + ); + + FinalizedBlock::::put(finalized); + } + + // and now prune headers if we need to + self.prune_blocks(MAX_BLOCKS_TO_PRUNE_IN_SINGLE_IMPORT, finalized_number, prune_end); + } +} + +/// Initialize storage. +#[cfg(any(feature = "std", feature = "runtime-benchmarks"))] +pub(crate) fn initialize_storage, I: Instance>( + initial_header: &AuraHeader, + initial_difficulty: U256, + initial_validators: &[Address], +) { + let initial_hash = initial_header.compute_hash(); + log::trace!( + target: "runtime", + "Initializing bridge with PoA header: ({}, {})", + initial_header.number, + initial_hash, + ); + + let initial_id = HeaderId { + number: initial_header.number, + hash: initial_hash, + }; + BestBlock::::put((initial_id, initial_difficulty)); + FinalizedBlock::::put(initial_id); + BlocksToPrune::::put(PruningRange { + oldest_unpruned_block: initial_header.number, + oldest_block_to_keep: initial_header.number, + }); + HeadersByNumber::::insert(initial_header.number, vec![initial_hash]); + Headers::::insert( + initial_hash, + StoredHeader { + submitter: None, + header: initial_header.clone(), + total_difficulty: initial_difficulty, + next_validators_set_id: 0, + last_signal_block: None, + }, + ); + NextValidatorsSetId::::put(1); + ValidatorsSets::::insert( + 0, + ValidatorsSet { + validators: initial_validators.to_vec(), + signal_block: None, + enact_block: initial_id, + }, + ); + ValidatorsSetsRc::::insert(0, 1); +} + +/// Verify that transaction is included into given finalized block. +pub fn verify_transaction_finalized( + storage: &S, + block: H256, + tx_index: u64, + proof: &[(RawTransaction, RawTransactionReceipt)], +) -> bool { + if tx_index >= proof.len() as _ { + log::trace!( + target: "runtime", + "Tx finality check failed: transaction index ({}) is larger than number of transactions ({})", + tx_index, + proof.len(), + ); + + return false; + } + + let header = match storage.header(&block) { + Some((header, _)) => header, + None => { + log::trace!( + target: "runtime", + "Tx finality check failed: can't find header in the storage: {}", + block, + ); + + return false; + } + }; + let finalized = storage.finalized_block(); + + // if header is not yet finalized => return + if header.number > finalized.number { + log::trace!( + target: "runtime", + "Tx finality check failed: header {}/{} is not finalized. Best finalized: {}", + header.number, + block, + finalized.number, + ); + + return false; + } + + // check if header is actually finalized + let is_finalized = match header.number < finalized.number { + true => ancestry(storage, finalized.hash) + .skip_while(|(_, ancestor)| ancestor.number > header.number) + .any(|(ancestor_hash, _)| ancestor_hash == block), + false => block == finalized.hash, + }; + if !is_finalized { + log::trace!( + target: "runtime", + "Tx finality check failed: header {} is not finalized: no canonical path to best finalized block {}", + block, + finalized.hash, + ); + + return false; + } + + // verify that transaction is included in the block + if let Err(computed_root) = header.check_transactions_root(proof.iter().map(|(tx, _)| tx)) { + log::trace!( + target: "runtime", + "Tx finality check failed: transactions root mismatch. Expected: {}, computed: {}", + header.transactions_root, + computed_root, + ); + + return false; + } + + // verify that transaction receipt is included in the block + if let Err(computed_root) = header.check_raw_receipts_root(proof.iter().map(|(_, r)| r)) { + log::trace!( + target: "runtime", + "Tx finality check failed: receipts root mismatch. Expected: {}, computed: {}", + header.receipts_root, + computed_root, + ); + + return false; + } + + // check that transaction has completed successfully + let is_successful_raw_receipt = Receipt::is_successful_raw_receipt(&proof[tx_index as usize].1); + match is_successful_raw_receipt { + Ok(true) => true, + Ok(false) => { + log::trace!( + target: "runtime", + "Tx finality check failed: receipt shows that transaction has failed", + ); + + false + } + Err(err) => { + log::trace!( + target: "runtime", + "Tx finality check failed: receipt check has failed: {}", + err, + ); + + false + } + } +} + +/// Transaction pool configuration. +fn pool_configuration() -> PoolConfiguration { + PoolConfiguration { + max_future_number_difference: 10, + } +} + +/// Return iterator of given header ancestors. +fn ancestry(storage: &'_ S, mut parent_hash: H256) -> impl Iterator + '_ { + sp_std::iter::from_fn(move || { + let (header, _) = storage.header(&parent_hash)?; + if header.number == 0 { + return None; + } + + let hash = parent_hash; + parent_hash = header.parent_hash; + Some((hash, header)) + }) +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use crate::finality::FinalityAncestor; + use crate::mock::{ + genesis, insert_header, run_test, run_test_with_genesis, validators_addresses, HeaderBuilder, TestRuntime, + GAS_LIMIT, + }; + use crate::test_utils::validator_utils::*; + use bp_eth_poa::compute_merkle_root; + + const TOTAL_VALIDATORS: usize = 3; + + fn example_tx() -> Vec { + vec![42] + } + + fn example_tx_receipt(success: bool) -> Vec { + Receipt { + // the only thing that we care of: + outcome: bp_eth_poa::TransactionOutcome::StatusCode(if success { 1 } else { 0 }), + gas_used: Default::default(), + log_bloom: Default::default(), + logs: Vec::new(), + } + .rlp() + } + + fn example_header_with_failed_receipt() -> AuraHeader { + HeaderBuilder::with_parent(&example_header()) + .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) + .receipts_root(compute_merkle_root(vec![example_tx_receipt(false)].into_iter())) + .sign_by(&validator(0)) + } + + fn example_header() -> AuraHeader { + HeaderBuilder::with_parent(&example_header_parent()) + .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) + .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) + .sign_by(&validator(0)) + } + + fn example_header_parent() -> AuraHeader { + HeaderBuilder::with_parent(&genesis()) + .transactions_root(compute_merkle_root(vec![example_tx()].into_iter())) + .receipts_root(compute_merkle_root(vec![example_tx_receipt(true)].into_iter())) + .sign_by(&validator(0)) + } + + fn with_headers_to_prune(f: impl Fn(BridgeStorage) -> T) -> T { + run_test(TOTAL_VALIDATORS, |ctx| { + for i in 1..10 { + let mut headers_by_number = Vec::with_capacity(5); + for j in 0..5 { + let header = HeaderBuilder::with_parent_number(i - 1) + .gas_limit((GAS_LIMIT + j).into()) + .sign_by_set(&ctx.validators); + let hash = header.compute_hash(); + headers_by_number.push(hash); + Headers::::insert( + hash, + StoredHeader { + submitter: None, + header, + total_difficulty: 0.into(), + next_validators_set_id: 0, + last_signal_block: None, + }, + ); + + if i == 7 && j == 1 { + ScheduledChanges::::insert( + hash, + AuraScheduledChange { + validators: validators_addresses(5), + prev_signal_block: None, + }, + ); + } + } + HeadersByNumber::::insert(i, headers_by_number); + } + + f(BridgeStorage::new()) + }) + } + + #[test] + fn blocks_are_not_pruned_if_range_is_empty() { + with_headers_to_prune(|storage| { + BlocksToPrune::::put(PruningRange { + oldest_unpruned_block: 5, + oldest_block_to_keep: 5, + }); + + // try to prune blocks [5; 10) + storage.prune_blocks(0xFFFF, 10, 5); + assert_eq!(HeadersByNumber::::get(&5).unwrap().len(), 5); + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 5, + oldest_block_to_keep: 5, + }, + ); + }); + } + + #[test] + fn blocks_to_prune_never_shrinks_from_the_end() { + with_headers_to_prune(|storage| { + BlocksToPrune::::put(PruningRange { + oldest_unpruned_block: 0, + oldest_block_to_keep: 5, + }); + + // try to prune blocks [5; 10) + storage.prune_blocks(0xFFFF, 10, 3); + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 5, + oldest_block_to_keep: 5, + }, + ); + }); + } + + #[test] + fn blocks_are_not_pruned_if_limit_is_zero() { + with_headers_to_prune(|storage| { + // try to prune blocks [0; 10) + storage.prune_blocks(0, 10, 10); + assert!(HeadersByNumber::::get(&0).is_some()); + assert!(HeadersByNumber::::get(&1).is_some()); + assert!(HeadersByNumber::::get(&2).is_some()); + assert!(HeadersByNumber::::get(&3).is_some()); + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 0, + oldest_block_to_keep: 10, + }, + ); + }); + } + + #[test] + fn blocks_are_pruned_if_limit_is_non_zero() { + with_headers_to_prune(|storage| { + // try to prune blocks [0; 10) + storage.prune_blocks(7, 10, 10); + // 1 headers with number = 0 is pruned (1 total) + assert!(HeadersByNumber::::get(&0).is_none()); + // 5 headers with number = 1 are pruned (6 total) + assert!(HeadersByNumber::::get(&1).is_none()); + // 1 header with number = 2 are pruned (7 total) + assert_eq!(HeadersByNumber::::get(&2).unwrap().len(), 4); + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 2, + oldest_block_to_keep: 10, + }, + ); + + // try to prune blocks [2; 10) + storage.prune_blocks(11, 10, 10); + // 4 headers with number = 2 are pruned (4 total) + assert!(HeadersByNumber::::get(&2).is_none()); + // 5 headers with number = 3 are pruned (9 total) + assert!(HeadersByNumber::::get(&3).is_none()); + // 2 headers with number = 4 are pruned (11 total) + assert_eq!(HeadersByNumber::::get(&4).unwrap().len(), 3); + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 4, + oldest_block_to_keep: 10, + }, + ); + }); + } + + #[test] + fn pruning_stops_on_unfainalized_block_with_scheduled_change() { + with_headers_to_prune(|storage| { + // try to prune blocks [0; 10) + // last finalized block is 5 + // and one of blocks#7 has scheduled change + // => we won't prune any block#7 at all + storage.prune_blocks(0xFFFF, 5, 10); + assert!(HeadersByNumber::::get(&0).is_none()); + assert!(HeadersByNumber::::get(&1).is_none()); + assert!(HeadersByNumber::::get(&2).is_none()); + assert!(HeadersByNumber::::get(&3).is_none()); + assert!(HeadersByNumber::::get(&4).is_none()); + assert!(HeadersByNumber::::get(&5).is_none()); + assert!(HeadersByNumber::::get(&6).is_none()); + assert_eq!(HeadersByNumber::::get(&7).unwrap().len(), 5); + assert_eq!( + BlocksToPrune::::get(), + PruningRange { + oldest_unpruned_block: 7, + oldest_block_to_keep: 10, + }, + ); + }); + } + + #[test] + fn finality_votes_are_cached() { + run_test(TOTAL_VALIDATORS, |ctx| { + let mut storage = BridgeStorage::::new(); + let interval = ::FinalityVotesCachingInterval::get().unwrap(); + + // for all headers with number < interval, cache entry is not created + for i in 1..interval { + let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); + let id = header.compute_id(); + insert_header(&mut storage, header); + assert_eq!(FinalityCache::::get(&id.hash), None); + } + + // for header with number = interval, cache entry is created + let header_with_entry = HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators); + let header_with_entry_hash = header_with_entry.compute_hash(); + insert_header(&mut storage, header_with_entry); + assert!(FinalityCache::::get(&header_with_entry_hash).is_some()); + + // when we later prune this header, cache entry is removed + BlocksToPrune::::put(PruningRange { + oldest_unpruned_block: interval - 1, + oldest_block_to_keep: interval - 1, + }); + storage.finalize_and_prune_headers(None, interval + 1); + assert_eq!(FinalityCache::::get(&header_with_entry_hash), None); + }); + } + + #[test] + fn cached_finality_votes_finds_entry() { + run_test(TOTAL_VALIDATORS, |ctx| { + // insert 5 headers + let mut storage = BridgeStorage::::new(); + let mut headers = Vec::new(); + for i in 1..5 { + let header = HeaderBuilder::with_parent_number(i - 1).sign_by_set(&ctx.validators); + headers.push(header.clone()); + insert_header(&mut storage, header); + } + + // when inserting header#6, entry isn't found + let id5 = headers.last().unwrap().compute_id(); + assert_eq!( + storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), + CachedFinalityVotes { + stopped_at_finalized_sibling: false, + unaccounted_ancestry: headers + .iter() + .map(|header| (header.compute_id(), None, header.clone(),)) + .rev() + .collect(), + votes: None, + }, + ); + + // let's now create entry at #3 + let hash3 = headers[2].compute_hash(); + let votes_at_3 = FinalityVotes { + votes: vec![([42; 20].into(), 21)].into_iter().collect(), + ancestry: vec![FinalityAncestor { + id: HeaderId { + number: 100, + hash: Default::default(), + }, + ..Default::default() + }] + .into_iter() + .collect(), + }; + FinalityCache::::insert(hash3, votes_at_3.clone()); + + // searching at #6 again => entry is found + assert_eq!( + storage.cached_finality_votes(&id5, &genesis().compute_id(), |_| false), + CachedFinalityVotes { + stopped_at_finalized_sibling: false, + unaccounted_ancestry: headers + .iter() + .skip(3) + .map(|header| (header.compute_id(), None, header.clone(),)) + .rev() + .collect(), + votes: Some(votes_at_3), + }, + ); + }); + } + + #[test] + fn cached_finality_votes_stops_at_finalized_sibling() { + run_test(TOTAL_VALIDATORS, |ctx| { + let mut storage = BridgeStorage::::new(); + + // insert header1 + let header1 = HeaderBuilder::with_parent_number(0).sign_by_set(&ctx.validators); + let header1_id = header1.compute_id(); + insert_header(&mut storage, header1); + + // insert header1' - sibling of header1 + let header1s = HeaderBuilder::with_parent_number(0) + .gas_limit((GAS_LIMIT + 1).into()) + .sign_by_set(&ctx.validators); + let header1s_id = header1s.compute_id(); + insert_header(&mut storage, header1s); + + // header1 is finalized + FinalizedBlock::::put(header1_id); + + // trying to get finality votes when importing header2 -> header1 succeeds + assert!( + !storage + .cached_finality_votes(&header1_id, &genesis().compute_id(), |_| false) + .stopped_at_finalized_sibling + ); + + // trying to get finality votes when importing header2s -> header1s fails + assert!( + storage + .cached_finality_votes(&header1s_id, &header1_id, |_| false) + .stopped_at_finalized_sibling + ); + }); + } + + #[test] + fn verify_transaction_finalized_works_for_best_finalized_header() { + run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { + let storage = BridgeStorage::::new(); + assert_eq!( + verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(true))], + ), + true, + ); + }); + } + + #[test] + fn verify_transaction_finalized_works_for_best_finalized_header_ancestor() { + run_test(TOTAL_VALIDATORS, |_| { + let mut storage = BridgeStorage::::new(); + insert_header(&mut storage, example_header_parent()); + insert_header(&mut storage, example_header()); + storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); + assert_eq!( + verify_transaction_finalized( + &storage, + example_header_parent().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(true))], + ), + true, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_proof_with_missing_tx() { + run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { + let storage = BridgeStorage::::new(); + assert_eq!( + verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), + false, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_unknown_header() { + run_test(TOTAL_VALIDATORS, |_| { + let storage = BridgeStorage::::new(); + assert_eq!( + verify_transaction_finalized(&storage, example_header().compute_hash(), 1, &[],), + false, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_unfinalized_header() { + run_test(TOTAL_VALIDATORS, |_| { + let mut storage = BridgeStorage::::new(); + insert_header(&mut storage, example_header_parent()); + insert_header(&mut storage, example_header()); + assert_eq!( + verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(true))], + ), + false, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_finalized_header_sibling() { + run_test(TOTAL_VALIDATORS, |_| { + let mut finalized_header_sibling = example_header(); + finalized_header_sibling.timestamp = 1; + let finalized_header_sibling_hash = finalized_header_sibling.compute_hash(); + + let mut storage = BridgeStorage::::new(); + insert_header(&mut storage, example_header_parent()); + insert_header(&mut storage, example_header()); + insert_header(&mut storage, finalized_header_sibling); + storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); + assert_eq!( + verify_transaction_finalized( + &storage, + finalized_header_sibling_hash, + 0, + &[(example_tx(), example_tx_receipt(true))], + ), + false, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_finalized_header_uncle() { + run_test(TOTAL_VALIDATORS, |_| { + let mut finalized_header_uncle = example_header_parent(); + finalized_header_uncle.timestamp = 1; + let finalized_header_uncle_hash = finalized_header_uncle.compute_hash(); + + let mut storage = BridgeStorage::::new(); + insert_header(&mut storage, example_header_parent()); + insert_header(&mut storage, finalized_header_uncle); + insert_header(&mut storage, example_header()); + storage.finalize_and_prune_headers(Some(example_header().compute_id()), 0); + assert_eq!( + verify_transaction_finalized( + &storage, + finalized_header_uncle_hash, + 0, + &[(example_tx(), example_tx_receipt(true))], + ), + false, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_invalid_transactions_in_proof() { + run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { + let storage = BridgeStorage::::new(); + assert_eq!( + verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[ + (example_tx(), example_tx_receipt(true)), + (example_tx(), example_tx_receipt(true)) + ], + ), + false, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_invalid_receipts_in_proof() { + run_test_with_genesis(example_header(), TOTAL_VALIDATORS, |_| { + let storage = BridgeStorage::::new(); + assert_eq!( + verify_transaction_finalized( + &storage, + example_header().compute_hash(), + 0, + &[(example_tx(), vec![42])], + ), + false, + ); + }); + } + + #[test] + fn verify_transaction_finalized_rejects_failed_transaction() { + run_test_with_genesis(example_header_with_failed_receipt(), TOTAL_VALIDATORS, |_| { + let storage = BridgeStorage::::new(); + assert_eq!( + verify_transaction_finalized( + &storage, + example_header_with_failed_receipt().compute_hash(), + 0, + &[(example_tx(), example_tx_receipt(false))], + ), + false, + ); + }); + } +} diff --git a/polkadot/modules/ethereum/src/mock.rs b/polkadot/modules/ethereum/src/mock.rs new file mode 100644 index 00000000000..35c093f3638 --- /dev/null +++ b/polkadot/modules/ethereum/src/mock.rs @@ -0,0 +1,192 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +// From construct_runtime macro +#![allow(clippy::from_over_into)] + +pub use crate::test_utils::{insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT}; +pub use bp_eth_poa::signatures::secret_to_address; + +use crate::validators::{ValidatorsConfiguration, ValidatorsSource}; +use crate::{AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy}; +use bp_eth_poa::{Address, AuraHeader, H256, U256}; +use frame_support::{parameter_types, weights::Weight}; +use secp256k1::SecretKey; +use sp_runtime::{ + testing::Header as SubstrateHeader, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; + +pub type AccountId = u64; + +type Block = frame_system::mocking::MockBlock; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + +use crate as pallet_ethereum; + +frame_support::construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Ethereum: pallet_ethereum::{Pallet, Call}, + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = SubstrateHeader; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const TestFinalityVotesCachingInterval: Option = Some(16); + pub TestAuraConfiguration: AuraConfiguration = test_aura_config(); + pub TestValidatorsConfiguration: ValidatorsConfiguration = test_validators_config(); +} + +impl Config for TestRuntime { + type AuraConfiguration = TestAuraConfiguration; + type ValidatorsConfiguration = TestValidatorsConfiguration; + type FinalityVotesCachingInterval = TestFinalityVotesCachingInterval; + type PruningStrategy = KeepSomeHeadersBehindBest; + type ChainTime = ConstChainTime; + type OnHeadersSubmitted = (); +} + +/// Test context. +pub struct TestContext { + /// Initial (genesis) header. + pub genesis: AuraHeader, + /// Number of initial validators. + pub total_validators: usize, + /// Secret keys of validators, ordered by validator index. + pub validators: Vec, + /// Addresses of validators, ordered by validator index. + pub addresses: Vec
, +} + +/// Aura configuration that is used in tests by default. +pub fn test_aura_config() -> AuraConfiguration { + AuraConfiguration { + empty_steps_transition: u64::max_value(), + strict_empty_steps_transition: 0, + validate_step_transition: 0x16e360, + validate_score_transition: 0x41a3c4, + two_thirds_majority_transition: u64::max_value(), + min_gas_limit: 0x1388.into(), + max_gas_limit: U256::max_value(), + maximum_extra_data_size: 0x20, + } +} + +/// Validators configuration that is used in tests by default. +pub fn test_validators_config() -> ValidatorsConfiguration { + ValidatorsConfiguration::Single(ValidatorsSource::List(validators_addresses(3))) +} + +/// Genesis header that is used in tests by default. +pub fn genesis() -> AuraHeader { + HeaderBuilder::genesis().sign_by(&validator(0)) +} + +/// Run test with default genesis header. +pub fn run_test(total_validators: usize, test: impl FnOnce(TestContext) -> T) -> T { + run_test_with_genesis(genesis(), total_validators, test) +} + +/// Run test with default genesis header. +pub fn run_test_with_genesis( + genesis: AuraHeader, + total_validators: usize, + test: impl FnOnce(TestContext) -> T, +) -> T { + let validators = validators(total_validators); + let addresses = validators_addresses(total_validators); + sp_io::TestExternalities::new( + CrateGenesisConfig { + initial_header: genesis.clone(), + initial_difficulty: 0.into(), + initial_validators: addresses.clone(), + } + .build_storage::() + .unwrap(), + ) + .execute_with(|| { + test(TestContext { + genesis, + total_validators, + validators, + addresses, + }) + }) +} + +/// Pruning strategy that keeps 10 headers behind best block. +pub struct KeepSomeHeadersBehindBest(pub u64); + +impl Default for KeepSomeHeadersBehindBest { + fn default() -> KeepSomeHeadersBehindBest { + KeepSomeHeadersBehindBest(10) + } +} + +impl PruningStrategy for KeepSomeHeadersBehindBest { + fn pruning_upper_bound(&mut self, best_number: u64, _: u64) -> u64 { + best_number.saturating_sub(self.0) + } +} + +/// Constant chain time +#[derive(Default)] +pub struct ConstChainTime; + +impl ChainTime for ConstChainTime { + fn is_timestamp_ahead(&self, timestamp: u64) -> bool { + let now = i32::max_value() as u64 / 2; + timestamp > now + } +} diff --git a/polkadot/modules/ethereum/src/test_utils.rs b/polkadot/modules/ethereum/src/test_utils.rs new file mode 100644 index 00000000000..18ad6876d68 --- /dev/null +++ b/polkadot/modules/ethereum/src/test_utils.rs @@ -0,0 +1,321 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Utilities for testing and benchmarking the Ethereum Bridge Pallet. +//! +//! Although the name implies that it is used by tests, it shouldn't be be used _directly_ by tests. +//! Instead these utilities should be used by the Mock runtime, which in turn is used by tests. +//! +//! On the other hand, they may be used directly by the bechmarking module. + +// Since this is test code it's fine that not everything is used +#![allow(dead_code)] + +use crate::finality::FinalityVotes; +use crate::validators::CHANGE_EVENT_HASH; +use crate::verification::calculate_score; +use crate::{Config, HeaderToImport, Storage}; + +use bp_eth_poa::{ + rlp_encode, + signatures::{secret_to_address, sign, SignHeader}, + Address, AuraHeader, Bloom, Receipt, SealedEmptyStep, H256, U256, +}; +use secp256k1::SecretKey; +use sp_std::prelude::*; + +/// Gas limit valid in test environment. +pub const GAS_LIMIT: u64 = 0x2000; + +/// Test header builder. +pub struct HeaderBuilder { + header: AuraHeader, + parent_header: AuraHeader, +} + +impl HeaderBuilder { + /// Creates default genesis header. + pub fn genesis() -> Self { + let current_step = 0u64; + Self { + header: AuraHeader { + gas_limit: GAS_LIMIT.into(), + seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], + ..Default::default() + }, + parent_header: Default::default(), + } + } + + /// Creates default header on top of test parent with given hash. + #[cfg(test)] + pub fn with_parent_hash(parent_hash: H256) -> Self { + Self::with_parent_hash_on_runtime::(parent_hash) + } + + /// Creates default header on top of test parent with given number. First parent is selected. + #[cfg(test)] + pub fn with_parent_number(parent_number: u64) -> Self { + Self::with_parent_number_on_runtime::(parent_number) + } + + /// Creates default header on top of parent with given hash. + pub fn with_parent_hash_on_runtime, I: crate::Instance>(parent_hash: H256) -> Self { + use crate::Headers; + use frame_support::StorageMap; + + let parent_header = Headers::::get(&parent_hash).unwrap().header; + Self::with_parent(&parent_header) + } + + /// Creates default header on top of parent with given number. First parent is selected. + pub fn with_parent_number_on_runtime, I: crate::Instance>(parent_number: u64) -> Self { + use crate::HeadersByNumber; + use frame_support::StorageMap; + + let parent_hash = HeadersByNumber::::get(parent_number).unwrap()[0]; + Self::with_parent_hash_on_runtime::(parent_hash) + } + + /// Creates default header on top of non-existent parent. + #[cfg(test)] + pub fn with_number(number: u64) -> Self { + Self::with_parent(&AuraHeader { + number: number - 1, + seal: vec![bp_eth_poa::rlp_encode(&(number - 1)).to_vec(), vec![]], + ..Default::default() + }) + } + + /// Creates default header on top of given parent. + pub fn with_parent(parent_header: &AuraHeader) -> Self { + let parent_step = parent_header.step().unwrap(); + let current_step = parent_step + 1; + Self { + header: AuraHeader { + parent_hash: parent_header.compute_hash(), + number: parent_header.number + 1, + gas_limit: GAS_LIMIT.into(), + seal: vec![bp_eth_poa::rlp_encode(¤t_step).to_vec(), vec![]], + difficulty: calculate_score(parent_step, current_step, 0), + ..Default::default() + }, + parent_header: parent_header.clone(), + } + } + + /// Update step of this header. + pub fn step(mut self, step: u64) -> Self { + let parent_step = self.parent_header.step(); + self.header.seal[0] = rlp_encode(&step).to_vec(); + self.header.difficulty = parent_step + .map(|parent_step| calculate_score(parent_step, step, 0)) + .unwrap_or_default(); + self + } + + /// Adds empty steps to this header. + pub fn empty_steps(mut self, empty_steps: &[(&SecretKey, u64)]) -> Self { + let sealed_empty_steps = empty_steps + .iter() + .map(|(author, step)| { + let mut empty_step = SealedEmptyStep { + step: *step, + signature: Default::default(), + }; + let message = empty_step.message(&self.header.parent_hash); + let signature: [u8; 65] = sign(author, message).into(); + empty_step.signature = signature.into(); + empty_step + }) + .collect::>(); + + // by default in test configuration headers are generated without empty steps seal + if self.header.seal.len() < 3 { + self.header.seal.push(Vec::new()); + } + + self.header.seal[2] = SealedEmptyStep::rlp_of(&sealed_empty_steps); + self + } + + /// Update difficulty field of this header. + pub fn difficulty(mut self, difficulty: U256) -> Self { + self.header.difficulty = difficulty; + self + } + + /// Update extra data field of this header. + pub fn extra_data(mut self, extra_data: Vec) -> Self { + self.header.extra_data = extra_data; + self + } + + /// Update gas limit field of this header. + pub fn gas_limit(mut self, gas_limit: U256) -> Self { + self.header.gas_limit = gas_limit; + self + } + + /// Update gas used field of this header. + pub fn gas_used(mut self, gas_used: U256) -> Self { + self.header.gas_used = gas_used; + self + } + + /// Update log bloom field of this header. + pub fn log_bloom(mut self, log_bloom: Bloom) -> Self { + self.header.log_bloom = log_bloom; + self + } + + /// Update receipts root field of this header. + pub fn receipts_root(mut self, receipts_root: H256) -> Self { + self.header.receipts_root = receipts_root; + self + } + + /// Update timestamp field of this header. + pub fn timestamp(mut self, timestamp: u64) -> Self { + self.header.timestamp = timestamp; + self + } + + /// Update transactions root field of this header. + pub fn transactions_root(mut self, transactions_root: H256) -> Self { + self.header.transactions_root = transactions_root; + self + } + + /// Signs header by given author. + pub fn sign_by(self, author: &SecretKey) -> AuraHeader { + self.header.sign_by(author) + } + + /// Signs header by given authors set. + pub fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader { + self.header.sign_by_set(authors) + } +} + +/// Helper function for getting a genesis header which has been signed by an authority. +pub fn build_genesis_header(author: &SecretKey) -> AuraHeader { + let genesis = HeaderBuilder::genesis(); + genesis.header.sign_by(&author) +} + +/// Helper function for building a custom child header which has been signed by an authority. +pub fn build_custom_header(author: &SecretKey, previous: &AuraHeader, customize_header: F) -> AuraHeader +where + F: FnOnce(AuraHeader) -> AuraHeader, +{ + let new_header = HeaderBuilder::with_parent(&previous); + let custom_header = customize_header(new_header.header); + custom_header.sign_by(author) +} + +/// Insert unverified header into storage. +/// +/// This function assumes that the header is signed by validator from the current set. +pub fn insert_header(storage: &mut S, header: AuraHeader) { + let id = header.compute_id(); + let best_finalized = storage.finalized_block(); + let import_context = storage.import_context(None, &header.parent_hash).unwrap(); + let parent_finality_votes = storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false); + let finality_votes = crate::finality::prepare_votes( + parent_finality_votes, + best_finalized, + &import_context.validators_set().validators.iter().collect(), + id, + &header, + None, + ) + .unwrap(); + + storage.insert_header(HeaderToImport { + context: storage.import_context(None, &header.parent_hash).unwrap(), + is_best: true, + id, + header, + total_difficulty: 0.into(), + enacted_change: None, + scheduled_change: None, + finality_votes, + }); +} + +/// Insert unverified header into storage. +/// +/// No assumptions about header author are made. The cost is that finality votes cache +/// is filled incorrectly, so this function shall not be used if you're going to insert +/// (or import) header descendants. +pub fn insert_dummy_header(storage: &mut S, header: AuraHeader) { + storage.insert_header(HeaderToImport { + context: storage.import_context(None, &header.parent_hash).unwrap(), + is_best: true, + id: header.compute_id(), + header, + total_difficulty: 0.into(), + enacted_change: None, + scheduled_change: None, + finality_votes: FinalityVotes::default(), + }); +} + +pub fn validators_change_receipt(parent_hash: H256) -> Receipt { + use bp_eth_poa::{LogEntry, TransactionOutcome}; + + Receipt { + gas_used: 0.into(), + log_bloom: (&[0xff; 256]).into(), + outcome: TransactionOutcome::Unknown, + logs: vec![LogEntry { + address: [3; 20].into(), + topics: vec![CHANGE_EVENT_HASH.into(), parent_hash], + data: vec![ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + ], + }], + } +} + +pub mod validator_utils { + use super::*; + + /// Return key pair of given test validator. + pub fn validator(index: usize) -> SecretKey { + let mut raw_secret = [0u8; 32]; + raw_secret[..8].copy_from_slice(&(index + 1).to_le_bytes()); + SecretKey::parse(&raw_secret).unwrap() + } + + /// Return key pairs of all test validators. + pub fn validators(count: usize) -> Vec { + (0..count).map(validator).collect() + } + + /// Return address of test validator. + pub fn validator_address(index: usize) -> Address { + secret_to_address(&validator(index)) + } + + /// Return addresses of all test validators. + pub fn validators_addresses(count: usize) -> Vec
{ + (0..count).map(validator_address).collect() + } +} diff --git a/polkadot/modules/ethereum/src/validators.rs b/polkadot/modules/ethereum/src/validators.rs new file mode 100644 index 00000000000..7ec22a44391 --- /dev/null +++ b/polkadot/modules/ethereum/src/validators.rs @@ -0,0 +1,476 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Error; +use crate::{ChangeToEnact, Storage}; +use bp_eth_poa::{Address, AuraHeader, HeaderId, LogEntry, Receipt, U256}; +use sp_std::prelude::*; + +/// The hash of InitiateChange event of the validators set contract. +pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[ + 0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, 0xd2, 0xc2, 0x28, + 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89, +]; + +/// Where source of validators addresses come from. This covers the chain lifetime. +pub enum ValidatorsConfiguration { + /// There's a single source for the whole chain lifetime. + Single(ValidatorsSource), + /// Validators source changes at given blocks. The blocks are ordered + /// by the block number. + Multi(Vec<(u64, ValidatorsSource)>), +} + +/// Where validators addresses come from. +/// +/// This source is valid within some blocks range. The blocks range could +/// cover multiple epochs - i.e. the validators that are authoring blocks +/// within this range could change, but the source itself can not. +#[cfg_attr(any(test, feature = "runtime-benchmarks"), derive(Debug, PartialEq))] +pub enum ValidatorsSource { + /// The validators addresses are hardcoded and never change. + List(Vec
), + /// The validators addresses are determined by the validators set contract + /// deployed at given address. The contract must implement the `ValidatorSet` + /// interface. Additionally, the initial validators set must be provided. + Contract(Address, Vec
), +} + +/// A short hand for optional validators change. +pub type ValidatorsChange = Option>; + +/// Validators manager. +pub struct Validators<'a> { + config: &'a ValidatorsConfiguration, +} + +impl<'a> Validators<'a> { + /// Creates new validators manager using given configuration. + pub fn new(config: &'a ValidatorsConfiguration) -> Self { + Self { config } + } + + /// Returns true if header (probabilistically) signals validators change and + /// the caller needs to provide transactions receipts to import the header. + pub fn maybe_signals_validators_change(&self, header: &AuraHeader) -> bool { + let (_, _, source) = self.source_at(header.number); + + // if we are taking validators set from the fixed list, there's always + // single epoch + // => we never require transactions receipts + let contract_address = match source { + ValidatorsSource::List(_) => return false, + ValidatorsSource::Contract(contract_address, _) => contract_address, + }; + + // else we need to check logs bloom and if it has required bits set, it means + // that the contract has (probably) emitted epoch change event + let expected_bloom = LogEntry { + address: *contract_address, + topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], + data: Vec::new(), // irrelevant for bloom. + } + .bloom(); + + header.log_bloom.contains(&expected_bloom) + } + + /// Extracts validators change signal from the header. + /// + /// Returns tuple where first element is the change scheduled by this header + /// (i.e. this change is only applied starting from the block that has finalized + /// current block). The second element is the immediately applied change. + pub fn extract_validators_change( + &self, + header: &AuraHeader, + receipts: Option>, + ) -> Result<(ValidatorsChange, ValidatorsChange), Error> { + // let's first check if new source is starting from this header + let (source_index, _, source) = self.source_at(header.number); + let (next_starts_at, next_source) = self.source_at_next_header(source_index, header.number); + if next_starts_at == header.number { + match *next_source { + ValidatorsSource::List(ref new_list) => return Ok((None, Some(new_list.clone()))), + ValidatorsSource::Contract(_, ref new_list) => return Ok((Some(new_list.clone()), None)), + } + } + + // else deal with previous source + // + // if we are taking validators set from the fixed list, there's always + // single epoch + // => we never require transactions receipts + let contract_address = match source { + ValidatorsSource::List(_) => return Ok((None, None)), + ValidatorsSource::Contract(contract_address, _) => contract_address, + }; + + // else we need to check logs bloom and if it has required bits set, it means + // that the contract has (probably) emitted epoch change event + let expected_bloom = LogEntry { + address: *contract_address, + topics: vec![CHANGE_EVENT_HASH.into(), header.parent_hash], + data: Vec::new(), // irrelevant for bloom. + } + .bloom(); + + if !header.log_bloom.contains(&expected_bloom) { + return Ok((None, None)); + } + + let receipts = receipts.ok_or(Error::MissingTransactionsReceipts)?; + if header.check_receipts_root(&receipts).is_err() { + return Err(Error::TransactionsReceiptsMismatch); + } + + // iterate in reverse because only the _last_ change in a given + // block actually has any effect + Ok(( + receipts + .iter() + .rev() + .filter(|r| r.log_bloom.contains(&expected_bloom)) + .flat_map(|r| r.logs.iter()) + .filter(|l| { + l.address == *contract_address + && l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH + && l.topics[1] == header.parent_hash + }) + .filter_map(|l| { + let data_len = l.data.len(); + if data_len < 64 { + return None; + } + + let new_validators_len_u256 = U256::from_big_endian(&l.data[32..64]); + let new_validators_len = new_validators_len_u256.low_u64(); + if new_validators_len_u256 != new_validators_len.into() { + return None; + } + + if (data_len - 64) as u64 != new_validators_len.saturating_mul(32) { + return None; + } + + Some( + l.data[64..] + .chunks(32) + .map(|chunk| { + let mut new_validator = Address::default(); + new_validator.as_mut().copy_from_slice(&chunk[12..32]); + new_validator + }) + .collect(), + ) + }) + .next(), + None, + )) + } + + /// Finalize changes when blocks are finalized. + pub fn finalize_validators_change( + &self, + storage: &S, + finalized_blocks: &[(HeaderId, Option)], + ) -> Option { + // if we haven't finalized any blocks, no changes may be finalized + let newest_finalized_id = match finalized_blocks.last().map(|(id, _)| id) { + Some(last_finalized_id) => last_finalized_id, + None => return None, + }; + let oldest_finalized_id = finalized_blocks + .first() + .map(|(id, _)| id) + .expect("finalized_blocks is not empty; qed"); + + // try to directly go to the header that has scheduled last change + // + // if we're unable to create import context for some block, it means + // that the header has already been pruned => it and its ancestors had + // no scheduled changes + // + // if we're unable to find scheduled changes for some block, it means + // that these changes have been finalized already + storage + .import_context(None, &newest_finalized_id.hash) + .and_then(|context| context.last_signal_block()) + .and_then(|signal_block| { + if signal_block.number >= oldest_finalized_id.number { + Some(signal_block) + } else { + None + } + }) + .and_then(|signal_block| { + storage + .scheduled_change(&signal_block.hash) + .map(|change| ChangeToEnact { + signal_block: Some(signal_block), + validators: change.validators, + }) + }) + } + + /// Returns source of validators that should author the header. + fn source_at(&self, header_number: u64) -> (usize, u64, &ValidatorsSource) { + match self.config { + ValidatorsConfiguration::Single(ref source) => (0, 0, source), + ValidatorsConfiguration::Multi(ref sources) => sources + .iter() + .rev() + .enumerate() + .find(|(_, &(begin, _))| begin < header_number) + .map(|(i, (begin, source))| (sources.len() - 1 - i, *begin, source)) + .expect( + "there's always entry for the initial block;\ + we do not touch any headers with number < initial block number; qed", + ), + } + } + + /// Returns source of validators that should author the next header. + fn source_at_next_header(&self, header_source_index: usize, header_number: u64) -> (u64, &ValidatorsSource) { + match self.config { + ValidatorsConfiguration::Single(ref source) => (0, source), + ValidatorsConfiguration::Multi(ref sources) => { + let next_source_index = header_source_index + 1; + if next_source_index < sources.len() { + let next_source = &sources[next_source_index]; + if next_source.0 < header_number + 1 { + return (next_source.0, &next_source.1); + } + } + + let source = &sources[header_source_index]; + (source.0, &source.1) + } + } + } +} + +impl ValidatorsSource { + /// Returns initial validators set. + pub fn initial_epoch_validators(&self) -> Vec
{ + match self { + ValidatorsSource::List(ref list) => list.clone(), + ValidatorsSource::Contract(_, ref list) => list.clone(), + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use crate::mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime}; + use crate::DefaultInstance; + use crate::{AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader}; + use bp_eth_poa::compute_merkle_root; + use frame_support::StorageMap; + + const TOTAL_VALIDATORS: usize = 3; + + #[test] + fn source_at_works() { + let config = ValidatorsConfiguration::Multi(vec![ + (0, ValidatorsSource::List(vec![[1; 20].into()])), + (100, ValidatorsSource::List(vec![[2; 20].into()])), + (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), + ]); + let validators = Validators::new(&config); + + assert_eq!( + validators.source_at(99), + (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), + ); + assert_eq!( + validators.source_at_next_header(0, 99), + (0, &ValidatorsSource::List(vec![[1; 20].into()])), + ); + + assert_eq!( + validators.source_at(100), + (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])), + ); + assert_eq!( + validators.source_at_next_header(0, 100), + (100, &ValidatorsSource::List(vec![[2; 20].into()])), + ); + + assert_eq!( + validators.source_at(200), + (1, 100, &ValidatorsSource::List(vec![[2; 20].into()])), + ); + assert_eq!( + validators.source_at_next_header(1, 200), + (200, &ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), + ); + } + + #[test] + fn maybe_signals_validators_change_works() { + // when contract is active, but bloom has no required bits set + let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); + let validators = Validators::new(&config); + let mut header = AuraHeader { + number: u64::max_value(), + ..Default::default() + }; + assert!(!validators.maybe_signals_validators_change(&header)); + + // when contract is active and bloom has required bits set + header.log_bloom = (&[0xff; 256]).into(); + assert!(validators.maybe_signals_validators_change(&header)); + + // when list is active and bloom has required bits set + let config = ValidatorsConfiguration::Single(ValidatorsSource::List(vec![[42; 20].into()])); + let validators = Validators::new(&config); + assert!(!validators.maybe_signals_validators_change(&header)); + } + + #[test] + fn extract_validators_change_works() { + let config = ValidatorsConfiguration::Multi(vec![ + (0, ValidatorsSource::List(vec![[1; 20].into()])), + (100, ValidatorsSource::List(vec![[2; 20].into()])), + (200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])), + ]); + let validators = Validators::new(&config); + let mut header = AuraHeader { + number: 100, + ..Default::default() + }; + + // when we're at the block that switches to list source + assert_eq!( + validators.extract_validators_change(&header, None), + Ok((None, Some(vec![[2; 20].into()]))), + ); + + // when we're inside list range + header.number = 150; + assert_eq!(validators.extract_validators_change(&header, None), Ok((None, None)),); + + // when we're at the block that switches to contract source + header.number = 200; + assert_eq!( + validators.extract_validators_change(&header, None), + Ok((Some(vec![[3; 20].into()]), None)), + ); + + // when we're inside contract range and logs bloom signals change + // but we have no receipts + header.number = 250; + header.log_bloom = (&[0xff; 256]).into(); + assert_eq!( + validators.extract_validators_change(&header, None), + Err(Error::MissingTransactionsReceipts), + ); + + // when we're inside contract range and logs bloom signals change + // but there's no change in receipts + header.receipts_root = "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" + .parse() + .unwrap(); + assert_eq!( + validators.extract_validators_change(&header, Some(Vec::new())), + Ok((None, None)), + ); + + // when we're inside contract range and logs bloom signals change + // and there's change in receipts + let receipts = vec![validators_change_receipt(Default::default())]; + header.receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); + assert_eq!( + validators.extract_validators_change(&header, Some(receipts)), + Ok((Some(vec![[7; 20].into()]), None)), + ); + + // when incorrect receipts root passed + assert_eq!( + validators.extract_validators_change(&header, Some(Vec::new())), + Err(Error::TransactionsReceiptsMismatch), + ); + } + + fn try_finalize_with_scheduled_change(scheduled_at: Option) -> Option { + run_test(TOTAL_VALIDATORS, |_| { + let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); + let validators = Validators::new(&config); + let storage = BridgeStorage::::new(); + + // when we're finailizing blocks 10...100 + let id10 = HeaderId { + number: 10, + hash: [10; 32].into(), + }; + let id100 = HeaderId { + number: 100, + hash: [100; 32].into(), + }; + let finalized_blocks = vec![(id10, None), (id100, None)]; + let header100 = StoredHeader:: { + submitter: None, + header: AuraHeader { + number: 100, + ..Default::default() + }, + total_difficulty: 0.into(), + next_validators_set_id: 0, + last_signal_block: scheduled_at, + }; + let scheduled_change = AuraScheduledChange { + validators: validators_addresses(1), + prev_signal_block: None, + }; + Headers::::insert(id100.hash, header100); + if let Some(scheduled_at) = scheduled_at { + ScheduledChanges::::insert(scheduled_at.hash, scheduled_change); + } + + validators.finalize_validators_change(&storage, &finalized_blocks) + }) + } + + #[test] + fn finalize_validators_change_finalizes_scheduled_change() { + let id50 = HeaderId { + number: 50, + ..Default::default() + }; + assert_eq!( + try_finalize_with_scheduled_change(Some(id50)), + Some(ChangeToEnact { + signal_block: Some(id50), + validators: validators_addresses(1), + }), + ); + } + + #[test] + fn finalize_validators_change_does_not_finalize_when_changes_are_not_scheduled() { + assert_eq!(try_finalize_with_scheduled_change(None), None,); + } + + #[test] + fn finalize_validators_change_does_not_finalize_changes_when_they_are_outside_of_range() { + let id5 = HeaderId { + number: 5, + ..Default::default() + }; + assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None,); + } +} diff --git a/polkadot/modules/ethereum/src/verification.rs b/polkadot/modules/ethereum/src/verification.rs new file mode 100644 index 00000000000..c79242d1d4d --- /dev/null +++ b/polkadot/modules/ethereum/src/verification.rs @@ -0,0 +1,945 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Error; +use crate::validators::{Validators, ValidatorsConfiguration}; +use crate::{AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage}; +use bp_eth_poa::{ + public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep, H256, H520, U128, U256, +}; +use codec::Encode; +use sp_io::crypto::secp256k1_ecdsa_recover; +use sp_runtime::transaction_validity::TransactionTag; +use sp_std::{vec, vec::Vec}; + +/// Pre-check to see if should try and import this header. +/// Returns error if we should not try to import this block. +/// Returns ID of passed header and best finalized header. +pub fn is_importable_header(storage: &S, header: &AuraHeader) -> Result<(HeaderId, HeaderId), Error> { + // we never import any header that competes with finalized header + let finalized_id = storage.finalized_block(); + if header.number <= finalized_id.number { + return Err(Error::AncientHeader); + } + // we never import any header with known hash + let id = header.compute_id(); + if storage.header(&id.hash).is_some() { + return Err(Error::KnownHeader); + } + + Ok((id, finalized_id)) +} + +/// Try accept unsigned aura header into transaction pool. +/// +/// Returns required and provided tags. +pub fn accept_aura_header_into_pool( + storage: &S, + config: &AuraConfiguration, + validators_config: &ValidatorsConfiguration, + pool_config: &PoolConfiguration, + header: &AuraHeader, + chain_time: &CT, + receipts: Option<&Vec>, +) -> Result<(Vec, Vec), Error> { + // check if we can verify further + let (header_id, _) = is_importable_header(storage, header)?; + + // we can always do contextless checks + contextless_checks(config, header, chain_time)?; + + // we want to avoid having same headers twice in the pool + // => we're strict about receipts here - if we need them, we require receipts to be Some, + // otherwise we require receipts to be None + let receipts_required = Validators::new(validators_config).maybe_signals_validators_change(header); + match (receipts_required, receipts.is_some()) { + (true, false) => return Err(Error::MissingTransactionsReceipts), + (false, true) => return Err(Error::RedundantTransactionsReceipts), + _ => (), + } + + // we do not want to have all future headers in the pool at once + // => if we see header with number > maximal ever seen header number + LIMIT, + // => we consider this transaction invalid, but only at this moment (we do not want to ban it) + // => let's mark it as Unknown transaction + let (best_id, _) = storage.best_block(); + let difference = header.number.saturating_sub(best_id.number); + if difference > pool_config.max_future_number_difference { + return Err(Error::UnsignedTooFarInTheFuture); + } + + // TODO: only accept new headers when we're at the tip of PoA chain + // https://github.com/paritytech/parity-bridges-common/issues/38 + + // we want to see at most one header with given number from single authority + // => every header is providing tag (block_number + authority) + // => since only one tx in the pool can provide the same tag, they're auto-deduplicated + let provides_number_and_authority_tag = (header.number, header.author).encode(); + + // we want to see several 'future' headers in the pool at once, but we may not have access to + // previous headers here + // => we can at least 'verify' that headers comprise a chain by providing and requiring + // tag (header.number, header.hash) + let provides_header_number_and_hash_tag = header_id.encode(); + + // depending on whether parent header is available, we either perform full or 'shortened' check + let context = storage.import_context(None, &header.parent_hash); + let tags = match context { + Some(context) => { + let header_step = contextual_checks(config, &context, None, header)?; + validator_checks(config, &context.validators_set().validators, header, header_step)?; + + // since our parent is already in the storage, we do not require it + // to be in the transaction pool + ( + vec![], + vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], + ) + } + None => { + // we know nothing about parent header + // => the best thing we can do is to believe that there are no forks in + // PoA chain AND that the header is produced either by previous, or next + // scheduled validators set change + let header_step = header.step().ok_or(Error::MissingStep)?; + let best_context = storage.import_context(None, &best_id.hash).expect( + "import context is None only when header is missing from the storage;\ + best header is always in the storage; qed", + ); + let validators_check_result = + validator_checks(config, &best_context.validators_set().validators, header, header_step); + if let Err(error) = validators_check_result { + find_next_validators_signal(storage, &best_context) + .ok_or(error) + .and_then(|next_validators| validator_checks(config, &next_validators, header, header_step))?; + } + + // since our parent is missing from the storage, we **DO** require it + // to be in the transaction pool + // (- 1 can't underflow because there's always best block in the header) + let requires_header_number_and_hash_tag = HeaderId { + number: header.number - 1, + hash: header.parent_hash, + } + .encode(); + ( + vec![requires_header_number_and_hash_tag], + vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag], + ) + } + }; + + // the heaviest, but rare operation - we do not want invalid receipts in the pool + if let Some(receipts) = receipts { + log::trace!(target: "runtime", "Got receipts! {:?}", receipts); + if header.check_receipts_root(receipts).is_err() { + return Err(Error::TransactionsReceiptsMismatch); + } + } + + Ok(tags) +} + +/// Verify header by Aura rules. +pub fn verify_aura_header( + storage: &S, + config: &AuraConfiguration, + submitter: Option, + header: &AuraHeader, + chain_time: &CT, +) -> Result, Error> { + // let's do the lightest check first + contextless_checks(config, header, chain_time)?; + + // the rest of checks requires access to the parent header + let context = storage.import_context(submitter, &header.parent_hash).ok_or_else(|| { + log::warn!( + target: "runtime", + "Missing parent PoA block: ({:?}, {})", + header.number.checked_sub(1), + header.parent_hash, + ); + + Error::MissingParentBlock + })?; + let header_step = contextual_checks(config, &context, None, header)?; + validator_checks(config, &context.validators_set().validators, header, header_step)?; + + Ok(context) +} + +/// Perform basic checks that only require header itself. +fn contextless_checks( + config: &AuraConfiguration, + header: &AuraHeader, + chain_time: &CT, +) -> Result<(), Error> { + let expected_seal_fields = expected_header_seal_fields(config, header); + if header.seal.len() != expected_seal_fields { + return Err(Error::InvalidSealArity); + } + if header.number >= u64::max_value() { + return Err(Error::RidiculousNumber); + } + if header.gas_used > header.gas_limit { + return Err(Error::TooMuchGasUsed); + } + if header.gas_limit < config.min_gas_limit { + return Err(Error::InvalidGasLimit); + } + if header.gas_limit > config.max_gas_limit { + return Err(Error::InvalidGasLimit); + } + if header.number != 0 && header.extra_data.len() as u64 > config.maximum_extra_data_size { + return Err(Error::ExtraDataOutOfBounds); + } + + // we can't detect if block is from future in runtime + // => let's only do an overflow check + if header.timestamp > i32::max_value() as u64 { + return Err(Error::TimestampOverflow); + } + + if chain_time.is_timestamp_ahead(header.timestamp) { + return Err(Error::HeaderTimestampIsAhead); + } + + Ok(()) +} + +/// Perform checks that require access to parent header. +fn contextual_checks( + config: &AuraConfiguration, + context: &ImportContext, + validators_override: Option<&[Address]>, + header: &AuraHeader, +) -> Result { + let validators = validators_override.unwrap_or_else(|| &context.validators_set().validators); + let header_step = header.step().ok_or(Error::MissingStep)?; + let parent_step = context.parent_header().step().ok_or(Error::MissingStep)?; + + // Ensure header is from the step after context. + if header_step == parent_step { + return Err(Error::DoubleVote); + } + #[allow(clippy::suspicious_operation_groupings)] + if header.number >= config.validate_step_transition && header_step < parent_step { + return Err(Error::DoubleVote); + } + + // If empty step messages are enabled we will validate the messages in the seal, missing messages are not + // reported as there's no way to tell whether the empty step message was never sent or simply not included. + let empty_steps_len = match header.number >= config.empty_steps_transition { + true => { + let strict_empty_steps = header.number >= config.strict_empty_steps_transition; + let empty_steps = header.empty_steps().ok_or(Error::MissingEmptySteps)?; + let empty_steps_len = empty_steps.len(); + let mut prev_empty_step = 0; + + for empty_step in empty_steps { + if empty_step.step <= parent_step || empty_step.step >= header_step { + return Err(Error::InsufficientProof); + } + + if !verify_empty_step(&header.parent_hash, &empty_step, validators) { + return Err(Error::InsufficientProof); + } + + if strict_empty_steps { + if empty_step.step <= prev_empty_step { + return Err(Error::InsufficientProof); + } + + prev_empty_step = empty_step.step; + } + } + + empty_steps_len + } + false => 0, + }; + + // Validate chain score. + if header.number >= config.validate_score_transition { + let expected_difficulty = calculate_score(parent_step, header_step, empty_steps_len as _); + if header.difficulty != expected_difficulty { + return Err(Error::InvalidDifficulty); + } + } + + Ok(header_step) +} + +/// Check that block is produced by expected validator. +fn validator_checks( + config: &AuraConfiguration, + validators: &[Address], + header: &AuraHeader, + header_step: u64, +) -> Result<(), Error> { + let expected_validator = *step_validator(validators, header_step); + if header.author != expected_validator { + return Err(Error::NotValidator); + } + + let validator_signature = header.signature().ok_or(Error::MissingSignature)?; + let header_seal_hash = header + .seal_hash(header.number >= config.empty_steps_transition) + .ok_or(Error::MissingEmptySteps)?; + let is_invalid_proposer = !verify_signature(&expected_validator, &validator_signature, &header_seal_hash); + if is_invalid_proposer { + return Err(Error::NotValidator); + } + + Ok(()) +} + +/// Returns expected number of seal fields in the header. +fn expected_header_seal_fields(config: &AuraConfiguration, header: &AuraHeader) -> usize { + if header.number != u64::max_value() && header.number >= config.empty_steps_transition { + 3 + } else { + 2 + } +} + +/// Verify single sealed empty step. +fn verify_empty_step(parent_hash: &H256, step: &SealedEmptyStep, validators: &[Address]) -> bool { + let expected_validator = *step_validator(validators, step.step); + let message = step.message(parent_hash); + verify_signature(&expected_validator, &step.signature, &message) +} + +/// Chain scoring: total weight is sqrt(U256::max_value())*height - step +pub(crate) fn calculate_score(parent_step: u64, current_step: u64, current_empty_steps: usize) -> U256 { + U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + U256::from(current_empty_steps) +} + +/// Verify that the signature over message has been produced by given validator. +fn verify_signature(expected_validator: &Address, signature: &H520, message: &H256) -> bool { + secp256k1_ecdsa_recover(signature.as_fixed_bytes(), message.as_fixed_bytes()) + .map(|public| public_to_address(&public)) + .map(|address| *expected_validator == address) + .unwrap_or(false) +} + +/// Find next unfinalized validators set change after finalized set. +fn find_next_validators_signal(storage: &S, context: &ImportContext) -> Option> { + // that's the earliest block number we may met in following loop + // it may be None if that's the first set + let best_set_signal_block = context.validators_set().signal_block; + + // if parent schedules validators set change, then it may be our set + // else we'll start with last known change + let mut current_set_signal_block = context.last_signal_block(); + let mut next_scheduled_set: Option = None; + + loop { + // if we have reached block that signals finalized change, then + // next_current_block_hash points to the block that schedules next + // change + let current_scheduled_set = match current_set_signal_block { + Some(current_set_signal_block) if Some(¤t_set_signal_block) == best_set_signal_block.as_ref() => { + return next_scheduled_set.map(|scheduled_set| scheduled_set.validators) + } + None => return next_scheduled_set.map(|scheduled_set| scheduled_set.validators), + Some(current_set_signal_block) => storage.scheduled_change(¤t_set_signal_block.hash).expect( + "header that is associated with this change is not pruned;\ + scheduled changes are only removed when header is pruned; qed", + ), + }; + + current_set_signal_block = current_scheduled_set.prev_signal_block; + next_scheduled_set = Some(current_scheduled_set); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{ + insert_header, run_test_with_genesis, test_aura_config, validator, validator_address, validators_addresses, + validators_change_receipt, AccountId, ConstChainTime, HeaderBuilder, TestRuntime, GAS_LIMIT, + }; + use crate::validators::ValidatorsSource; + use crate::DefaultInstance; + use crate::{ + pool_configuration, BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId, + ScheduledChanges, ValidatorsSet, ValidatorsSets, + }; + use bp_eth_poa::{compute_merkle_root, rlp_encode, TransactionOutcome, H520, U256}; + use frame_support::{StorageMap, StorageValue}; + use hex_literal::hex; + use secp256k1::SecretKey; + use sp_runtime::transaction_validity::TransactionTag; + + const GENESIS_STEP: u64 = 42; + const TOTAL_VALIDATORS: usize = 3; + + fn genesis() -> AuraHeader { + HeaderBuilder::genesis().step(GENESIS_STEP).sign_by(&validator(0)) + } + + fn verify_with_config(config: &AuraConfiguration, header: &AuraHeader) -> Result, Error> { + run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { + let storage = BridgeStorage::::new(); + verify_aura_header(&storage, &config, None, header, &ConstChainTime::default()) + }) + } + + fn default_verify(header: &AuraHeader) -> Result, Error> { + verify_with_config(&test_aura_config(), header) + } + + fn default_accept_into_pool( + mut make_header: impl FnMut(&[SecretKey]) -> (AuraHeader, Option>), + ) -> Result<(Vec, Vec), Error> { + run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| { + let validators = vec![validator(0), validator(1), validator(2)]; + let mut storage = BridgeStorage::::new(); + let block1 = HeaderBuilder::with_parent_number(0).sign_by_set(&validators); + insert_header(&mut storage, block1); + let block2 = HeaderBuilder::with_parent_number(1).sign_by_set(&validators); + let block2_id = block2.compute_id(); + insert_header(&mut storage, block2); + let block3 = HeaderBuilder::with_parent_number(2).sign_by_set(&validators); + insert_header(&mut storage, block3); + + FinalizedBlock::::put(block2_id); + + let validators_config = + ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new())); + let (header, receipts) = make_header(&validators); + accept_aura_header_into_pool( + &storage, + &test_aura_config(), + &validators_config, + &pool_configuration(), + &header, + &(), + receipts.as_ref(), + ) + }) + } + + fn change_validators_set_at(number: u64, finalized_set: Vec
, signalled_set: Option>) { + let set_id = NextValidatorsSetId::::get(); + NextValidatorsSetId::::put(set_id + 1); + ValidatorsSets::::insert( + set_id, + ValidatorsSet { + validators: finalized_set, + signal_block: None, + enact_block: HeaderId { + number: 0, + hash: HeadersByNumber::::get(&0).unwrap()[0], + }, + }, + ); + + let header_hash = HeadersByNumber::::get(&number).unwrap()[0]; + let mut header = Headers::::get(&header_hash).unwrap(); + header.next_validators_set_id = set_id; + if let Some(signalled_set) = signalled_set { + header.last_signal_block = Some(HeaderId { + number: header.header.number - 1, + hash: header.header.parent_hash, + }); + ScheduledChanges::::insert( + header.header.parent_hash, + AuraScheduledChange { + validators: signalled_set, + prev_signal_block: None, + }, + ); + } + + Headers::::insert(header_hash, header); + } + + #[test] + fn verifies_seal_count() { + // when there are no seals at all + let mut header = AuraHeader::default(); + assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); + + // when there's single seal (we expect 2 or 3 seals) + header.seal = vec![vec![]]; + assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); + + // when there's 3 seals (we expect 2 by default) + header.seal = vec![vec![], vec![], vec![]]; + assert_eq!(default_verify(&header), Err(Error::InvalidSealArity)); + + // when there's 2 seals + header.seal = vec![vec![], vec![]]; + assert_ne!(default_verify(&header), Err(Error::InvalidSealArity)); + } + + #[test] + fn verifies_header_number() { + // when number is u64::max_value() + let header = HeaderBuilder::with_number(u64::max_value()).sign_by(&validator(0)); + assert_eq!(default_verify(&header), Err(Error::RidiculousNumber)); + + // when header is < u64::max_value() + let header = HeaderBuilder::with_number(u64::max_value() - 1).sign_by(&validator(0)); + assert_ne!(default_verify(&header), Err(Error::RidiculousNumber)); + } + + #[test] + fn verifies_gas_used() { + // when gas used is larger than gas limit + let header = HeaderBuilder::with_number(1) + .gas_used((GAS_LIMIT + 1).into()) + .sign_by(&validator(0)); + assert_eq!(default_verify(&header), Err(Error::TooMuchGasUsed)); + + // when gas used is less than gas limit + let header = HeaderBuilder::with_number(1) + .gas_used((GAS_LIMIT - 1).into()) + .sign_by(&validator(0)); + assert_ne!(default_verify(&header), Err(Error::TooMuchGasUsed)); + } + + #[test] + fn verifies_gas_limit() { + let mut config = test_aura_config(); + config.min_gas_limit = 100.into(); + config.max_gas_limit = 200.into(); + + // when limit is lower than expected + let header = HeaderBuilder::with_number(1) + .gas_limit(50.into()) + .sign_by(&validator(0)); + assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); + + // when limit is larger than expected + let header = HeaderBuilder::with_number(1) + .gas_limit(250.into()) + .sign_by(&validator(0)); + assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); + + // when limit is within expected range + let header = HeaderBuilder::with_number(1) + .gas_limit(150.into()) + .sign_by(&validator(0)); + assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit)); + } + + #[test] + fn verifies_extra_data_len() { + // when extra data is too large + let header = HeaderBuilder::with_number(1) + .extra_data(std::iter::repeat(42).take(1000).collect::>()) + .sign_by(&validator(0)); + assert_eq!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); + + // when extra data size is OK + let header = HeaderBuilder::with_number(1) + .extra_data(std::iter::repeat(42).take(10).collect::>()) + .sign_by(&validator(0)); + assert_ne!(default_verify(&header), Err(Error::ExtraDataOutOfBounds)); + } + + #[test] + fn verifies_timestamp() { + // when timestamp overflows i32 + let header = HeaderBuilder::with_number(1) + .timestamp(i32::max_value() as u64 + 1) + .sign_by(&validator(0)); + assert_eq!(default_verify(&header), Err(Error::TimestampOverflow)); + + // when timestamp doesn't overflow i32 + let header = HeaderBuilder::with_number(1) + .timestamp(i32::max_value() as u64) + .sign_by(&validator(0)); + assert_ne!(default_verify(&header), Err(Error::TimestampOverflow)); + } + + #[test] + fn verifies_chain_time() { + // expected import context after verification + let expect = ImportContext:: { + submitter: None, + parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(), + parent_header: genesis(), + parent_total_difficulty: U256::zero(), + parent_scheduled_change: None, + validators_set_id: 0, + validators_set: ValidatorsSet { + validators: vec![ + hex!("dc5b20847f43d67928f49cd4f85d696b5a7617b5").into(), + hex!("897df33a7b3c62ade01e22c13d48f98124b4480f").into(), + hex!("05c987b34c6ef74e0c7e69c6e641120c24164c2d").into(), + ], + signal_block: None, + enact_block: HeaderId { + number: 0, + hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(), + }, + }, + last_signal_block: None, + }; + + // header is behind + let header = HeaderBuilder::with_parent(&genesis()) + .timestamp(i32::max_value() as u64 / 2 - 100) + .sign_by(&validator(1)); + assert_eq!(default_verify(&header).unwrap(), expect); + + // header is ahead + let header = HeaderBuilder::with_parent(&genesis()) + .timestamp(i32::max_value() as u64 / 2 + 100) + .sign_by(&validator(1)); + assert_eq!(default_verify(&header), Err(Error::HeaderTimestampIsAhead)); + + // header has same timestamp as ConstChainTime + let header = HeaderBuilder::with_parent(&genesis()) + .timestamp(i32::max_value() as u64 / 2) + .sign_by(&validator(1)); + assert_eq!(default_verify(&header).unwrap(), expect); + } + + #[test] + fn verifies_parent_existence() { + // when there's no parent in the storage + let header = HeaderBuilder::with_number(1).sign_by(&validator(0)); + assert_eq!(default_verify(&header), Err(Error::MissingParentBlock)); + + // when parent is in the storage + let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); + assert_ne!(default_verify(&header), Err(Error::MissingParentBlock)); + } + + #[test] + fn verifies_step() { + // when step is missing from seals + let mut header = AuraHeader { + seal: vec![vec![], vec![]], + gas_limit: test_aura_config().min_gas_limit, + parent_hash: genesis().compute_hash(), + ..Default::default() + }; + assert_eq!(default_verify(&header), Err(Error::MissingStep)); + + // when step is the same as for the parent block + header.seal[0] = rlp_encode(&42u64).to_vec(); + assert_eq!(default_verify(&header), Err(Error::DoubleVote)); + + // when step is OK + header.seal[0] = rlp_encode(&43u64).to_vec(); + assert_ne!(default_verify(&header), Err(Error::DoubleVote)); + + // now check with validate_step check enabled + let mut config = test_aura_config(); + config.validate_step_transition = 0; + + // when step is lesser that for the parent block + header.seal[0] = rlp_encode(&40u64).to_vec(); + header.seal = vec![vec![40], vec![]]; + assert_eq!(verify_with_config(&config, &header), Err(Error::DoubleVote)); + + // when step is OK + header.seal[0] = rlp_encode(&44u64).to_vec(); + assert_ne!(verify_with_config(&config, &header), Err(Error::DoubleVote)); + } + + #[test] + fn verifies_empty_step() { + let mut config = test_aura_config(); + config.empty_steps_transition = 0; + + // when empty step duplicates parent step + let header = HeaderBuilder::with_parent(&genesis()) + .empty_steps(&[(&validator(0), GENESIS_STEP)]) + .step(GENESIS_STEP + 3) + .sign_by(&validator(3)); + assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); + + // when empty step signature check fails + let header = HeaderBuilder::with_parent(&genesis()) + .empty_steps(&[(&validator(100), GENESIS_STEP + 1)]) + .step(GENESIS_STEP + 3) + .sign_by(&validator(3)); + assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); + + // when we are accepting strict empty steps and they come not in order + config.strict_empty_steps_transition = 0; + let header = HeaderBuilder::with_parent(&genesis()) + .empty_steps(&[(&validator(2), GENESIS_STEP + 2), (&validator(1), GENESIS_STEP + 1)]) + .step(GENESIS_STEP + 3) + .sign_by(&validator(3)); + assert_eq!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); + + // when empty steps are OK + let header = HeaderBuilder::with_parent(&genesis()) + .empty_steps(&[(&validator(1), GENESIS_STEP + 1), (&validator(2), GENESIS_STEP + 2)]) + .step(GENESIS_STEP + 3) + .sign_by(&validator(3)); + assert_ne!(verify_with_config(&config, &header), Err(Error::InsufficientProof)); + } + + #[test] + fn verifies_chain_score() { + let mut config = test_aura_config(); + config.validate_score_transition = 0; + + // when chain score is invalid + let header = HeaderBuilder::with_parent(&genesis()) + .difficulty(100.into()) + .sign_by(&validator(0)); + assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); + + // when chain score is accepted + let header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(0)); + assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidDifficulty)); + } + + #[test] + fn verifies_validator() { + let good_header = HeaderBuilder::with_parent(&genesis()).sign_by(&validator(1)); + + // when header author is invalid + let mut header = good_header.clone(); + header.author = Default::default(); + assert_eq!(default_verify(&header), Err(Error::NotValidator)); + + // when header signature is invalid + let mut header = good_header.clone(); + header.seal[1] = rlp_encode(&H520::default()).to_vec(); + assert_eq!(default_verify(&header), Err(Error::NotValidator)); + + // when everything is OK + assert_eq!(default_verify(&good_header).map(|_| ()), Ok(())); + } + + #[test] + fn pool_verifies_known_blocks() { + // when header is known + assert_eq!( + default_accept_into_pool(|validators| (HeaderBuilder::with_parent_number(2).sign_by_set(validators), None)), + Err(Error::KnownHeader), + ); + } + + #[test] + fn pool_verifies_ancient_blocks() { + // when header number is less than finalized + assert_eq!( + default_accept_into_pool(|validators| ( + HeaderBuilder::with_parent_number(1) + .gas_limit((GAS_LIMIT + 1).into()) + .sign_by_set(validators), + None, + ),), + Err(Error::AncientHeader), + ); + } + + #[test] + fn pool_rejects_headers_without_required_receipts() { + assert_eq!( + default_accept_into_pool(|_| ( + AuraHeader { + number: 20_000_000, + seal: vec![vec![], vec![]], + gas_limit: test_aura_config().min_gas_limit, + log_bloom: (&[0xff; 256]).into(), + ..Default::default() + }, + None, + ),), + Err(Error::MissingTransactionsReceipts), + ); + } + + #[test] + fn pool_rejects_headers_with_redundant_receipts() { + assert_eq!( + default_accept_into_pool(|validators| ( + HeaderBuilder::with_parent_number(3).sign_by_set(validators), + Some(vec![Receipt { + gas_used: 1.into(), + log_bloom: (&[0xff; 256]).into(), + logs: vec![], + outcome: TransactionOutcome::Unknown, + }]), + ),), + Err(Error::RedundantTransactionsReceipts), + ); + } + + #[test] + fn pool_verifies_future_block_number() { + // when header is too far from the future + assert_eq!( + default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(&validators), None),), + Err(Error::UnsignedTooFarInTheFuture), + ); + } + + #[test] + fn pool_performs_full_verification_when_parent_is_known() { + // if parent is known, then we'll execute contextual_checks, which + // checks for DoubleVote + assert_eq!( + default_accept_into_pool(|validators| ( + HeaderBuilder::with_parent_number(3) + .step(GENESIS_STEP + 3) + .sign_by_set(&validators), + None, + ),), + Err(Error::DoubleVote), + ); + } + + #[test] + fn pool_performs_validators_checks_when_parent_is_unknown() { + // if parent is unknown, then we still need to check if header has required signature + // (even if header will be considered invalid/duplicate later, we can use this signature + // as a proof of malicious action by this validator) + assert_eq!( + default_accept_into_pool(|_| (HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)), None,)), + Err(Error::NotValidator), + ); + } + + #[test] + fn pool_verifies_header_with_known_parent() { + let mut hash = None; + assert_eq!( + default_accept_into_pool(|validators| { + let header = HeaderBuilder::with_parent_number(3).sign_by_set(validators); + hash = Some(header.compute_hash()); + (header, None) + }), + Ok(( + // no tags are required + vec![], + // header provides two tags + vec![ + (4u64, validators_addresses(3)[1]).encode(), + (4u64, hash.unwrap()).encode(), + ], + )), + ); + } + + #[test] + fn pool_verifies_header_with_unknown_parent() { + let mut id = None; + let mut parent_id = None; + assert_eq!( + default_accept_into_pool(|validators| { + let header = HeaderBuilder::with_number(5) + .step(GENESIS_STEP + 5) + .sign_by_set(validators); + id = Some(header.compute_id()); + parent_id = header.parent_id(); + (header, None) + }), + Ok(( + // parent tag required + vec![parent_id.unwrap().encode()], + // header provides two tags + vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], + )), + ); + } + + #[test] + fn pool_uses_next_validators_set_when_finalized_fails() { + assert_eq!( + default_accept_into_pool(|actual_validators| { + // change finalized set at parent header + change_validators_set_at(3, validators_addresses(1), None); + + // header is signed using wrong set + let header = HeaderBuilder::with_number(5) + .step(GENESIS_STEP + 2) + .sign_by_set(actual_validators); + + (header, None) + }), + Err(Error::NotValidator), + ); + + let mut id = None; + let mut parent_id = None; + assert_eq!( + default_accept_into_pool(|actual_validators| { + // change finalized set at parent header + signal valid set at parent block + change_validators_set_at(3, validators_addresses(10), Some(validators_addresses(3))); + + // header is signed using wrong set + let header = HeaderBuilder::with_number(5) + .step(GENESIS_STEP + 2) + .sign_by_set(actual_validators); + id = Some(header.compute_id()); + parent_id = header.parent_id(); + + (header, None) + }), + Ok(( + // parent tag required + vec![parent_id.unwrap().encode(),], + // header provides two tags + vec![(5u64, validator_address(2)).encode(), id.unwrap().encode(),], + )), + ); + } + + #[test] + fn pool_rejects_headers_with_invalid_receipts() { + assert_eq!( + default_accept_into_pool(|validators| { + let header = HeaderBuilder::with_parent_number(3) + .log_bloom((&[0xff; 256]).into()) + .sign_by_set(validators); + (header, Some(vec![validators_change_receipt(Default::default())])) + }), + Err(Error::TransactionsReceiptsMismatch), + ); + } + + #[test] + fn pool_accepts_headers_with_valid_receipts() { + let mut hash = None; + let receipts = vec![validators_change_receipt(Default::default())]; + let receipts_root = compute_merkle_root(receipts.iter().map(|r| r.rlp())); + + assert_eq!( + default_accept_into_pool(|validators| { + let header = HeaderBuilder::with_parent_number(3) + .log_bloom((&[0xff; 256]).into()) + .receipts_root(receipts_root) + .sign_by_set(validators); + hash = Some(header.compute_hash()); + (header, Some(receipts.clone())) + }), + Ok(( + // no tags are required + vec![], + // header provides two tags + vec![ + (4u64, validators_addresses(3)[1]).encode(), + (4u64, hash.unwrap()).encode(), + ], + )), + ); + } +} diff --git a/polkadot/modules/grandpa/Cargo.toml b/polkadot/modules/grandpa/Cargo.toml new file mode 100644 index 00000000000..810dce3dd5a --- /dev/null +++ b/polkadot/modules/grandpa/Cargo.toml @@ -0,0 +1,59 @@ +[package] +name = "pallet-bridge-grandpa" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +finality-grandpa = { version = "0.14.0", default-features = false } +log = { version = "0.4.14", default-features = false } +num-traits = { version = "0.2", default-features = false } +serde = { version = "1.0", optional = true } + +# Bridge Dependencies + +bp-runtime = { path = "../../primitives/runtime", default-features = false } +bp-header-chain = { path = "../../primitives/header-chain", default-features = false } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +# Optional Benchmarking Dependencies +bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } + +[dev-dependencies] +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = ["std"] +std = [ + "bp-header-chain/std", + "bp-runtime/std", + "bp-test-utils/std", + "codec/std", + "finality-grandpa/std", + "frame-support/std", + "frame-system/std", + "log/std", + "num-traits/std", + "serde", + "sp-finality-grandpa/std", + "sp-runtime/std", + "sp-std/std", + "sp-trie/std", +] +runtime-benchmarks = [ + "bp-test-utils", + "frame-benchmarking", +] diff --git a/polkadot/modules/grandpa/src/benchmarking.rs b/polkadot/modules/grandpa/src/benchmarking.rs new file mode 100644 index 00000000000..cb170fdc8b1 --- /dev/null +++ b/polkadot/modules/grandpa/src/benchmarking.rs @@ -0,0 +1,272 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Benchmarks for the GRANDPA Pallet. +//! +//! The main dispatchable for the GRANDPA pallet is `submit_finality_proof`, so these benchmarks are +//! based around that. There are to main factors which affect finality proof verification: +//! +//! 1. The number of `votes-ancestries` in the justification +//! 2. The number of `pre-commits` in the justification +//! +//! Vote ancestries are the headers between (`finality_target`, `head_of_chain`], where +//! `header_of_chain` is a decendant of `finality_target`. +//! +//! Pre-commits are messages which are signed by validators at the head of the chain they think is +//! the best. +//! +//! Consider the following: +//! +//! / [B'] <- [C'] +//! [A] <- [B] <- [C] +//! +//! The common ancestor of both forks is block A, so this is what GRANDPA will finalize. In order to +//! verify this we will have vote ancestries of [B, C, B', C'] and pre-commits [C, C']. +//! +//! Note that the worst case scenario here would be a justification where each validator has it's +//! own fork which is `SESSION_LENGTH` blocks long. +//! +//! As far as benchmarking results go, the only benchmark that should be used in +//! `pallet-bridge-grandpa` to annotate weights is the `submit_finality_proof` one. The others are +//! looking at the effects of specific code paths and do not actually reflect the overall worst case +//! scenario. + +use crate::*; + +use bp_test_utils::{ + accounts, authority_list, make_justification_for_header, test_keyring, JustificationGeneratorParams, ALICE, + TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID, +}; +use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; +use frame_system::RawOrigin; +use sp_finality_grandpa::AuthorityId; +use sp_runtime::traits::{One, Zero}; +use sp_std::{vec, vec::Vec}; + +// The maximum number of vote ancestries to include in a justification. +// +// In practice this would be limited by the session length (number of blocks a single authority set +// can produce) of a given chain. +const MAX_VOTE_ANCESTRIES: u32 = 1000; + +// The maximum number of pre-commits to include in a justification. In practice this scales with the +// number of validators. +const MAX_VALIDATOR_SET_SIZE: u32 = 1024; + +benchmarks_instance_pallet! { + // This is the "gold standard" benchmark for this extrinsic, and it's what should be used to + // annotate the weight in the pallet. + // + // The other benchmarks related to `submit_finality_proof` are looking at the effect of specific + // parameters and are there mostly for seeing how specific codepaths behave. + submit_finality_proof { + let v in 1..MAX_VOTE_ANCESTRIES; + let p in 1..MAX_VALIDATOR_SET_SIZE; + + let caller: T::AccountId = whitelisted_caller(); + + let authority_list = accounts(p as u16) + .iter() + .map(|id| (AuthorityId::from(*id), 1)) + .collect::>(); + + let init_data = InitializationData { + header: bp_test_utils::test_header(Zero::zero()), + authority_list, + set_id: TEST_GRANDPA_SET_ID, + is_halted: false, + }; + + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + + let params = JustificationGeneratorParams { + header: header.clone(), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::>(), + votes: v, + forks: 1, + }; + + let justification = make_justification_for_header(params); + + }: _(RawOrigin::Signed(caller), header, justification) + verify { + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + let expected_hash = header.hash(); + + assert_eq!(>::get(), expected_hash); + assert!(>::contains_key(expected_hash)); + } + + // What we want to check here is the effect of vote ancestries on justification verification + // do this by varying the number of headers between `finality_target` and `header_of_chain`. + submit_finality_proof_on_single_fork { + let v in 1..MAX_VOTE_ANCESTRIES; + + let caller: T::AccountId = whitelisted_caller(); + + let init_data = InitializationData { + header: bp_test_utils::test_header(Zero::zero()), + authority_list: authority_list(), + set_id: TEST_GRANDPA_SET_ID, + is_halted: false, + }; + + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + + let params = JustificationGeneratorParams { + header: header.clone(), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: test_keyring(), + votes: v, + forks: 1, + }; + + let justification = make_justification_for_header(params); + + }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) + verify { + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + let expected_hash = header.hash(); + + assert_eq!(>::get(), expected_hash); + assert!(>::contains_key(expected_hash)); + } + + // What we want to check here is the effect of many pre-commits on justification verification. + // We do this by creating many forks, whose head will be used as a signed pre-commit in the + // final justification. + submit_finality_proof_on_many_forks { + let p in 1..MAX_VALIDATOR_SET_SIZE; + + let caller: T::AccountId = whitelisted_caller(); + + let authority_list = accounts(p as u16) + .iter() + .map(|id| (AuthorityId::from(*id), 1)) + .collect::>(); + + let init_data = InitializationData { + header: bp_test_utils::test_header(Zero::zero()), + authority_list, + set_id: TEST_GRANDPA_SET_ID, + is_halted: false, + }; + + initialize_bridge::(init_data); + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + + let params = JustificationGeneratorParams { + header: header.clone(), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: accounts(p as u16).iter().map(|k| (*k, 1)).collect::>(), + votes: p, + forks: p, + }; + + let justification = make_justification_for_header(params); + + }: submit_finality_proof(RawOrigin::Signed(caller), header, justification) + verify { + let header: BridgedHeader = bp_test_utils::test_header(One::one()); + let expected_hash = header.hash(); + + assert_eq!(>::get(), expected_hash); + assert!(>::contains_key(expected_hash)); + } + + // Here we want to find out the overheaded of looking through consensus digests found in a + // header. As the number of logs in a header grows, how much more work do we require to look + // through them? + // + // Note that this should be the same for looking through scheduled changes and forces changes, + // which is why we only have one benchmark for this. + find_scheduled_change { + // Not really sure what a good bound for this is. + let n in 1..1000; + + let mut logs = vec![]; + for i in 0..n { + // We chose a non-consensus log on purpose since that way we have to look through all + // the logs in the header + logs.push(sp_runtime::DigestItem::Other(vec![])); + } + + let mut header: BridgedHeader = bp_test_utils::test_header(Zero::zero()); + let digest = header.digest_mut(); + *digest = sp_runtime::Digest { + logs, + }; + + }: { + crate::find_scheduled_change(&header) + } + + // What we want to check here is how long it takes to read and write the authority set tracked + // by the pallet as the number of authorities grows. + read_write_authority_sets { + // The current max target number of validators on Polkadot/Kusama + let n in 1..1000; + + let mut authorities = vec![]; + for i in 0..n { + authorities.push((ALICE, 1)); + } + + let authority_set = bp_header_chain::AuthoritySet { + authorities: authorities.iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect(), + set_id: 0 + }; + + >::put(&authority_set); + + }: { + let authority_set = >::get(); + >::put(&authority_set); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use frame_support::assert_ok; + + #[test] + fn finality_proof_is_valid() { + mock::run_test(|| { + assert_ok!(test_benchmark_submit_finality_proof::()); + }); + } + + #[test] + fn single_fork_finality_proof_is_valid() { + mock::run_test(|| { + assert_ok!(test_benchmark_submit_finality_proof_on_single_fork::()); + }); + } + + #[test] + fn multi_fork_finality_proof_is_valid() { + mock::run_test(|| { + assert_ok!(test_benchmark_submit_finality_proof_on_many_forks::()); + }); + } +} diff --git a/polkadot/modules/grandpa/src/lib.rs b/polkadot/modules/grandpa/src/lib.rs new file mode 100644 index 00000000000..9fb7372b020 --- /dev/null +++ b/polkadot/modules/grandpa/src/lib.rs @@ -0,0 +1,1036 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate GRANDPA Pallet +//! +//! This pallet is an on-chain GRANDPA light client for Substrate based chains. +//! +//! This pallet achieves this by trustlessly verifying GRANDPA finality proofs on-chain. Once +//! verified, finalized headers are stored in the pallet, thereby creating a sparse header chain. +//! This sparse header chain can be used as a source of truth for other higher-level applications. +//! +//! The pallet is responsible for tracking GRANDPA validator set hand-offs. We only import headers +//! with justifications signed by the current validator set we know of. The header is inspected for +//! a `ScheduledChanges` digest item, which is then used to update to next validator set. +//! +//! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only +//! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe +//! bug causing resulting in an equivocation. Such events are outside of the scope of this pallet. +//! Shall the fork occur on the bridged chain governance intervention will be required to +//! re-initialize the bridge and track the right fork. + +#![cfg_attr(not(feature = "std"), no_std)] +// Runtime-generated enums +#![allow(clippy::large_enum_variant)] + +use crate::weights::WeightInfo; + +use bp_header_chain::justification::GrandpaJustification; +use bp_header_chain::InitializationData; +use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; +use finality_grandpa::voter_set::VoterSet; +use frame_support::ensure; +use frame_system::{ensure_signed, RawOrigin}; +use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; +use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero}; + +#[cfg(test)] +mod mock; + +/// Pallet containing weights for this pallet. +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; + +// Re-export in crate namespace for `construct_runtime!` +pub use pallet::*; + +/// Block number of the bridged chain. +pub type BridgedBlockNumber = BlockNumberOf<>::BridgedChain>; +/// Block hash of the bridged chain. +pub type BridgedBlockHash = HashOf<>::BridgedChain>; +/// Hasher of the bridged chain. +pub type BridgedBlockHasher = HasherOf<>::BridgedChain>; +/// Header of the bridged chain. +pub type BridgedHeader = HeaderOf<>::BridgedChain>; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The chain we are bridging to here. + type BridgedChain: Chain; + + /// The upper bound on the number of requests allowed by the pallet. + /// + /// A request refers to an action which writes a header to storage. + /// + /// Once this bound is reached the pallet will not allow any dispatchables to be called + /// until the request count has decreased. + #[pallet::constant] + type MaxRequests: Get; + + /// Maximal number of finalized headers to keep in the storage. + /// + /// The setting is there to prevent growing the on-chain state indefinitely. Note + /// the setting does not relate to block numbers - we will simply keep as much items + /// in the storage, so it doesn't guarantee any fixed timeframe for finality headers. + #[pallet::constant] + type HeadersToKeep: Get; + + /// Weights gathered through benchmarking. + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData<(T, I)>); + + #[pallet::hooks] + impl, I: 'static> Hooks> for Pallet { + fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { + >::mutate(|count| *count = count.saturating_sub(1)); + + (0_u64) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + } + + #[pallet::call] + impl, I: 'static> Pallet { + /// Verify a target header is finalized according to the given finality proof. + /// + /// It will use the underlying storage pallet to fetch information about the current + /// authorities and best finalized header in order to verify that the header is finalized. + /// + /// If successful in verification, it will write the target header to the underlying storage + /// pallet. + #[pallet::weight(T::WeightInfo::submit_finality_proof( + justification.votes_ancestries.len() as u32, + justification.commit.precommits.len() as u32, + ))] + pub fn submit_finality_proof( + origin: OriginFor, + finality_target: BridgedHeader, + justification: GrandpaJustification>, + ) -> DispatchResultWithPostInfo { + ensure_operational::()?; + let _ = ensure_signed(origin)?; + + ensure!( + Self::request_count() < T::MaxRequests::get(), + >::TooManyRequests + ); + + let (hash, number) = (finality_target.hash(), finality_target.number()); + log::trace!(target: "runtime::bridge-grandpa", "Going to try and finalize header {:?}", finality_target); + + let best_finalized = >::get(>::get()).expect( + "In order to reach this point the bridge must have been initialized. Afterwards, + every time `BestFinalized` is updated `ImportedHeaders` is also updated. Therefore + `ImportedHeaders` must contain an entry for `BestFinalized`.", + ); + + // We do a quick check here to ensure that our header chain is making progress and isn't + // "travelling back in time" (which could be indicative of something bad, e.g a hard-fork). + ensure!(best_finalized.number() < number, >::OldHeader); + + let authority_set = >::get(); + let set_id = authority_set.set_id; + verify_justification::(&justification, hash, *number, authority_set)?; + + let _enacted = try_enact_authority_change::(&finality_target, set_id)?; + let index = >::get(); + let pruning = >::try_get(index); + >::put(hash); + >::insert(hash, finality_target); + >::insert(index, hash); + >::mutate(|count| *count += 1); + + // Update ring buffer pointer and remove old header. + >::put((index + 1) % T::HeadersToKeep::get()); + if let Ok(hash) = pruning { + log::debug!(target: "runtime::bridge-grandpa", "Pruning old header: {:?}.", hash); + >::remove(hash); + } + + log::info!(target: "runtime::bridge-grandpa", "Succesfully imported finalized header with hash {:?}!", hash); + + Ok(().into()) + } + + /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. + /// + /// The initial configuration provided does not need to be the genesis header of the bridged + /// chain, it can be any arbirary header. You can also provide the next scheduled set change + /// if it is already know. + /// + /// This function is only allowed to be called from a trusted origin and writes to storage + /// with practically no checks in terms of the validity of the data. It is important that + /// you ensure that valid data is being passed in. + #[pallet::weight((T::DbWeight::get().reads_writes(2, 5), DispatchClass::Operational))] + pub fn initialize( + origin: OriginFor, + init_data: super::InitializationData>, + ) -> DispatchResultWithPostInfo { + ensure_owner_or_root::(origin)?; + + let init_allowed = !>::exists(); + ensure!(init_allowed, >::AlreadyInitialized); + initialize_bridge::(init_data.clone()); + + log::info!( + target: "runtime::bridge-grandpa", + "Pallet has been initialized with the following parameters: {:?}", + init_data + ); + + Ok(().into()) + } + + /// Change `PalletOwner`. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResultWithPostInfo { + ensure_owner_or_root::(origin)?; + match new_owner { + Some(new_owner) => { + PalletOwner::::put(&new_owner); + log::info!(target: "runtime::bridge-grandpa", "Setting pallet Owner to: {:?}", new_owner); + } + None => { + PalletOwner::::kill(); + log::info!(target: "runtime::bridge-grandpa", "Removed Owner of pallet."); + } + } + + Ok(().into()) + } + + /// Halt or resume all pallet operations. + /// + /// May only be called either by root, or by `PalletOwner`. + #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] + pub fn set_operational(origin: OriginFor, operational: bool) -> DispatchResultWithPostInfo { + ensure_owner_or_root::(origin)?; + >::put(operational); + + if operational { + log::info!(target: "runtime::bridge-grandpa", "Resuming pallet operations."); + } else { + log::warn!(target: "runtime::bridge-grandpa", "Stopping pallet operations."); + } + + Ok(().into()) + } + } + + /// The current number of requests which have written to storage. + /// + /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until + /// the request capacity is increased. + /// + /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure + /// that the pallet can always make progress. + #[pallet::storage] + #[pallet::getter(fn request_count)] + pub(super) type RequestCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + + /// Hash of the header used to bootstrap the pallet. + #[pallet::storage] + pub(super) type InitialHash, I: 'static = ()> = StorageValue<_, BridgedBlockHash, ValueQuery>; + + /// Hash of the best finalized header. + #[pallet::storage] + pub(super) type BestFinalized, I: 'static = ()> = StorageValue<_, BridgedBlockHash, ValueQuery>; + + /// A ring buffer of imported hashes. Ordered by the insertion time. + #[pallet::storage] + pub(super) type ImportedHashes, I: 'static = ()> = + StorageMap<_, Identity, u32, BridgedBlockHash>; + + /// Current ring buffer position. + #[pallet::storage] + pub(super) type ImportedHashesPointer, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; + + /// Headers which have been imported into the pallet. + #[pallet::storage] + pub(super) type ImportedHeaders, I: 'static = ()> = + StorageMap<_, Identity, BridgedBlockHash, BridgedHeader>; + + /// The current GRANDPA Authority set. + #[pallet::storage] + pub(super) type CurrentAuthoritySet, I: 'static = ()> = + StorageValue<_, bp_header_chain::AuthoritySet, ValueQuery>; + + /// Optional pallet owner. + /// + /// Pallet owner has a right to halt all pallet operations and then resume it. If it is + /// `None`, then there are no direct ways to halt/resume pallet operations, but other + /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt + /// flag directly or call the `halt_operations`). + #[pallet::storage] + pub(super) type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>; + + /// If true, all pallet transactions are failed immediately. + #[pallet::storage] + pub(super) type IsHalted, I: 'static = ()> = StorageValue<_, bool, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + /// Optional module owner account. + pub owner: Option, + /// Optional module initialization data. + pub init_data: Option>>, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { + owner: None, + init_data: None, + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + if let Some(ref owner) = self.owner { + >::put(owner); + } + + if let Some(init_data) = self.init_data.clone() { + initialize_bridge::(init_data); + } else { + // Since the bridge hasn't been initialized we shouldn't allow anyone to perform + // transactions. + >::put(true); + } + } + } + + #[pallet::error] + pub enum Error { + /// The given justification is invalid for the given header. + InvalidJustification, + /// The authority set from the underlying header chain is invalid. + InvalidAuthoritySet, + /// There are too many requests for the current window to handle. + TooManyRequests, + /// The header being imported is older than the best finalized header known to the pallet. + OldHeader, + /// The header is unknown to the pallet. + UnknownHeader, + /// The scheduled authority set change found in the header is unsupported by the pallet. + /// + /// This is the case for non-standard (e.g forced) authority set changes. + UnsupportedScheduledChange, + /// The pallet has already been initialized. + AlreadyInitialized, + /// All pallet operations are halted. + Halted, + /// The storage proof doesn't contains storage root. So it is invalid for given header. + StorageRootMismatch, + } + + /// Check the given header for a GRANDPA scheduled authority set change. If a change + /// is found it will be enacted immediately. + /// + /// This function does not support forced changes, or scheduled changes with delays + /// since these types of changes are indicitive of abnormal behaviour from GRANDPA. + /// + /// Returned value will indicate if a change was enacted or not. + pub(crate) fn try_enact_authority_change, I: 'static>( + header: &BridgedHeader, + current_set_id: sp_finality_grandpa::SetId, + ) -> Result { + let mut change_enacted = false; + + // We don't support forced changes - at that point governance intervention is required. + ensure!( + super::find_forced_change(header).is_none(), + >::UnsupportedScheduledChange + ); + + if let Some(change) = super::find_scheduled_change(header) { + // GRANDPA only includes a `delay` for forced changes, so this isn't valid. + ensure!(change.delay == Zero::zero(), >::UnsupportedScheduledChange); + + // TODO [#788]: Stop manually increasing the `set_id` here. + let next_authorities = bp_header_chain::AuthoritySet { + authorities: change.next_authorities, + set_id: current_set_id + 1, + }; + + // Since our header schedules a change and we know the delay is 0, it must also enact + // the change. + >::put(&next_authorities); + change_enacted = true; + + log::info!( + target: "runtime::bridge-grandpa", + "Transitioned from authority set {} to {}! New authorities are: {:?}", + current_set_id, + current_set_id + 1, + next_authorities, + ); + }; + + Ok(change_enacted) + } + + /// Verify a GRANDPA justification (finality proof) for a given header. + /// + /// Will use the GRANDPA current authorities known to the pallet. + /// + /// If succesful it returns the decoded GRANDPA justification so we can refund any weight which + /// was overcharged in the initial call. + pub(crate) fn verify_justification, I: 'static>( + justification: &GrandpaJustification>, + hash: BridgedBlockHash, + number: BridgedBlockNumber, + authority_set: bp_header_chain::AuthoritySet, + ) -> Result<(), sp_runtime::DispatchError> { + use bp_header_chain::justification::verify_justification; + + let voter_set = VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; + let set_id = authority_set.set_id; + + Ok( + verify_justification::>((hash, number), set_id, &voter_set, &justification).map_err( + |e| { + log::error!(target: "runtime::bridge-grandpa", "Received invalid justification for {:?}: {:?}", hash, e); + >::InvalidJustification + }, + )?, + ) + } + + /// Since this writes to storage with no real checks this should only be used in functions that + /// were called by a trusted origin. + pub(crate) fn initialize_bridge, I: 'static>( + init_params: super::InitializationData>, + ) { + let super::InitializationData { + header, + authority_list, + set_id, + is_halted, + } = init_params; + + let initial_hash = header.hash(); + >::put(initial_hash); + >::put(initial_hash); + >::insert(initial_hash, header); + + let authority_set = bp_header_chain::AuthoritySet::new(authority_list, set_id); + >::put(authority_set); + + >::put(is_halted); + } + + /// Ensure that the origin is either root, or `PalletOwner`. + fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { + match origin.into() { + Ok(RawOrigin::Root) => Ok(()), + Ok(RawOrigin::Signed(ref signer)) if Some(signer) == >::get().as_ref() => Ok(()), + _ => Err(BadOrigin), + } + } + + /// Ensure that the pallet is in operational mode (not halted). + fn ensure_operational, I: 'static>() -> Result<(), Error> { + if >::get() { + Err(>::Halted) + } else { + Ok(()) + } + } +} + +impl, I: 'static> Pallet { + /// Get the best finalized header the pallet knows of. + /// + /// Returns a dummy header if there is no best header. This can only happen + /// if the pallet has not been initialized yet. + pub fn best_finalized() -> BridgedHeader { + let hash = >::get(); + >::get(hash).unwrap_or_else(|| { + >::new( + Default::default(), + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + }) + } + + /// Check if a particular header is known to the bridge pallet. + pub fn is_known_header(hash: BridgedBlockHash) -> bool { + >::contains_key(hash) + } + + /// Verify that the passed storage proof is valid, given it is crafted using + /// known finalized header. If the proof is valid, then the `parse` callback + /// is called and the function returns its result. + pub fn parse_finalized_storage_proof( + hash: BridgedBlockHash, + storage_proof: sp_trie::StorageProof, + parse: impl FnOnce(bp_runtime::StorageProofChecker>) -> R, + ) -> Result { + let header = >::get(hash).ok_or(Error::::UnknownHeader)?; + let storage_proof_checker = bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof) + .map_err(|_| Error::::StorageRootMismatch)?; + + Ok(parse(storage_proof_checker)) + } +} + +pub(crate) fn find_scheduled_change(header: &H) -> Option> { + use sp_runtime::generic::OpaqueDigestItemId; + + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +} + +/// Checks the given header for a consensus digest signalling a **forced** scheduled change and +/// extracts it. +pub(crate) fn find_forced_change( + header: &H, +) -> Option<(H::Number, sp_finality_grandpa::ScheduledChange)> { + use sp_runtime::generic::OpaqueDigestItemId; + + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +} + +/// (Re)initialize bridge with given header for using it in `pallet-bridge-messages` benchmarks. +#[cfg(feature = "runtime-benchmarks")] +pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader) { + initialize_bridge::(InitializationData { + header, + authority_list: sp_std::vec::Vec::new(), // we don't verify any proofs in external benchmarks + set_id: 0, + is_halted: false, + }); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{run_test, test_header, Origin, TestHash, TestHeader, TestNumber, TestRuntime}; + use bp_test_utils::{ + authority_list, make_default_justification, make_justification_for_header, JustificationGeneratorParams, ALICE, + BOB, + }; + use codec::Encode; + use frame_support::weights::PostDispatchInfo; + use frame_support::{assert_err, assert_noop, assert_ok}; + use sp_runtime::{Digest, DigestItem, DispatchError}; + + fn initialize_substrate_bridge() { + assert_ok!(init_with_origin(Origin::root())); + } + + fn init_with_origin( + origin: Origin, + ) -> Result, sp_runtime::DispatchErrorWithPostInfo> { + let genesis = test_header(0); + + let init_data = InitializationData { + header: genesis, + authority_list: authority_list(), + set_id: 1, + is_halted: false, + }; + + Pallet::::initialize(origin, init_data.clone()).map(|_| init_data) + } + + fn submit_finality_proof(header: u8) -> frame_support::dispatch::DispatchResultWithPostInfo { + let header = test_header(header.into()); + let justification = make_default_justification(&header); + + Pallet::::submit_finality_proof(Origin::signed(1), header, justification) + } + + fn next_block() { + use frame_support::traits::OnInitialize; + + let current_number = frame_system::Pallet::::block_number(); + frame_system::Pallet::::set_block_number(current_number + 1); + let _ = Pallet::::on_initialize(current_number); + } + + fn change_log(delay: u64) -> Digest { + let consensus_log = ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { + next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], + delay, + }); + + Digest:: { + logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], + } + } + + fn forced_change_log(delay: u64) -> Digest { + let consensus_log = ConsensusLog::::ForcedChange( + delay, + sp_finality_grandpa::ScheduledChange { + next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], + delay, + }, + ); + + Digest:: { + logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())], + } + } + + #[test] + fn init_root_or_owner_origin_can_initialize_pallet() { + run_test(|| { + assert_noop!(init_with_origin(Origin::signed(1)), DispatchError::BadOrigin); + assert_ok!(init_with_origin(Origin::root())); + + // Reset storage so we can initialize the pallet again + BestFinalized::::kill(); + PalletOwner::::put(2); + assert_ok!(init_with_origin(Origin::signed(2))); + }) + } + + #[test] + fn init_storage_entries_are_correctly_initialized() { + run_test(|| { + assert_eq!( + BestFinalized::::get(), + BridgedBlockHash::::default() + ); + assert_eq!(Pallet::::best_finalized(), test_header(0)); + + let init_data = init_with_origin(Origin::root()).unwrap(); + + assert!(>::contains_key(init_data.header.hash())); + assert_eq!(BestFinalized::::get(), init_data.header.hash()); + assert_eq!( + CurrentAuthoritySet::::get().authorities, + init_data.authority_list + ); + assert_eq!(IsHalted::::get(), false); + }) + } + + #[test] + fn init_can_only_initialize_pallet_once() { + run_test(|| { + initialize_substrate_bridge(); + assert_noop!( + init_with_origin(Origin::root()), + >::AlreadyInitialized + ); + }) + } + + #[test] + fn pallet_owner_may_change_owner() { + run_test(|| { + PalletOwner::::put(2); + + assert_ok!(Pallet::::set_owner(Origin::root(), Some(1))); + assert_noop!( + Pallet::::set_operational(Origin::signed(2), false), + DispatchError::BadOrigin, + ); + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + + assert_ok!(Pallet::::set_owner(Origin::signed(1), None)); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + assert_noop!( + Pallet::::set_operational(Origin::signed(2), true), + DispatchError::BadOrigin, + ); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + }); + } + + #[test] + fn pallet_may_be_halted_by_root() { + run_test(|| { + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + }); + } + + #[test] + fn pallet_may_be_halted_by_owner() { + run_test(|| { + PalletOwner::::put(2); + + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_ok!(Pallet::::set_operational(Origin::signed(2), true)); + + assert_noop!( + Pallet::::set_operational(Origin::signed(1), false), + DispatchError::BadOrigin, + ); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + }); + } + + #[test] + fn pallet_rejects_transactions_if_halted() { + run_test(|| { + >::put(true); + + assert_noop!(submit_finality_proof(1), Error::::Halted,); + }) + } + + #[test] + fn succesfully_imports_header_with_valid_finality() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + + let header = test_header(1); + assert_eq!(>::get(), header.hash()); + assert!(>::contains_key(header.hash())); + }) + } + + #[test] + fn rejects_justification_that_skips_authority_set_transition() { + run_test(|| { + initialize_substrate_bridge(); + + let header = test_header(1); + + let params = JustificationGeneratorParams:: { + set_id: 2, + ..Default::default() + }; + let justification = make_justification_for_header(params); + + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + >::InvalidJustification + ); + }) + } + + #[test] + fn does_not_import_header_with_invalid_finality_proof() { + run_test(|| { + initialize_substrate_bridge(); + + let header = test_header(1); + let mut justification = make_default_justification(&header); + justification.round = 42; + + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + >::InvalidJustification + ); + }) + } + + #[test] + fn disallows_invalid_authority_set() { + run_test(|| { + let genesis = test_header(0); + + let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)]; + let init_data = InitializationData { + header: genesis, + authority_list: invalid_authority_list, + set_id: 1, + is_halted: false, + }; + + assert_ok!(Pallet::::initialize(Origin::root(), init_data)); + + let header = test_header(1); + let justification = make_default_justification(&header); + + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification,), + >::InvalidAuthoritySet + ); + }) + } + + #[test] + fn importing_header_ensures_that_chain_is_extended() { + run_test(|| { + initialize_substrate_bridge(); + + assert_ok!(submit_finality_proof(4)); + assert_err!(submit_finality_proof(3), Error::::OldHeader); + assert_ok!(submit_finality_proof(5)); + }) + } + + #[test] + fn importing_header_enacts_new_authority_set() { + run_test(|| { + initialize_substrate_bridge(); + + let next_set_id = 2; + let next_authorities = vec![(ALICE.into(), 1), (BOB.into(), 1)]; + + // Need to update the header digest to indicate that our header signals an authority set + // change. The change will be enacted when we import our header. + let mut header = test_header(2); + header.digest = change_log(0); + + // Create a valid justification for the header + let justification = make_default_justification(&header); + + // Let's import our test header + assert_ok!(Pallet::::submit_finality_proof( + Origin::signed(1), + header.clone(), + justification + )); + + // Make sure that our header is the best finalized + assert_eq!(>::get(), header.hash()); + assert!(>::contains_key(header.hash())); + + // Make sure that the authority set actually changed upon importing our header + assert_eq!( + >::get(), + bp_header_chain::AuthoritySet::new(next_authorities, next_set_id), + ); + }) + } + + #[test] + fn importing_header_rejects_header_with_scheduled_change_delay() { + run_test(|| { + initialize_substrate_bridge(); + + // Need to update the header digest to indicate that our header signals an authority set + // change. However, the change doesn't happen until the next block. + let mut header = test_header(2); + header.digest = change_log(1); + + // Create a valid justification for the header + let justification = make_default_justification(&header); + + // Should not be allowed to import this header + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification), + >::UnsupportedScheduledChange + ); + }) + } + + #[test] + fn importing_header_rejects_header_with_forced_changes() { + run_test(|| { + initialize_substrate_bridge(); + + // Need to update the header digest to indicate that it signals a forced authority set + // change. + let mut header = test_header(2); + header.digest = forced_change_log(0); + + // Create a valid justification for the header + let justification = make_default_justification(&header); + + // Should not be allowed to import this header + assert_err!( + Pallet::::submit_finality_proof(Origin::signed(1), header, justification), + >::UnsupportedScheduledChange + ); + }) + } + + #[test] + fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { + run_test(|| { + assert_noop!( + Pallet::::parse_finalized_storage_proof( + Default::default(), + sp_trie::StorageProof::new(vec![]), + |_| (), + ), + Error::::UnknownHeader, + ); + }); + } + + #[test] + fn parse_finalized_storage_accepts_valid_proof() { + run_test(|| { + let (state_root, storage_proof) = bp_runtime::craft_valid_storage_proof(); + + let mut header = test_header(2); + header.set_state_root(state_root); + + let hash = header.hash(); + >::put(hash); + >::insert(hash, header); + + assert_ok!( + Pallet::::parse_finalized_storage_proof(hash, storage_proof, |_| (),), + (), + ); + }); + } + + #[test] + fn rate_limiter_disallows_imports_once_limit_is_hit_in_single_block() { + run_test(|| { + initialize_substrate_bridge(); + + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + assert_err!(submit_finality_proof(3), >::TooManyRequests); + }) + } + + #[test] + fn rate_limiter_invalid_requests_do_not_count_towards_request_count() { + run_test(|| { + let submit_invalid_request = || { + let header = test_header(1); + let mut invalid_justification = make_default_justification(&header); + invalid_justification.round = 42; + + Pallet::::submit_finality_proof(Origin::signed(1), header, invalid_justification) + }; + + initialize_substrate_bridge(); + + for _ in 0..::MaxRequests::get() + 1 { + // Notice that the error here *isn't* `TooManyRequests` + assert_err!(submit_invalid_request(), >::InvalidJustification); + } + + // Can still submit `MaxRequests` requests afterwards + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + assert_err!(submit_finality_proof(3), >::TooManyRequests); + }) + } + + #[test] + fn rate_limiter_allows_request_after_new_block_has_started() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + + next_block(); + assert_ok!(submit_finality_proof(3)); + }) + } + + #[test] + fn rate_limiter_disallows_imports_once_limit_is_hit_across_different_blocks() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + + next_block(); + assert_ok!(submit_finality_proof(3)); + assert_err!(submit_finality_proof(4), >::TooManyRequests); + }) + } + + #[test] + fn rate_limiter_allows_max_requests_after_long_time_with_no_activity() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + assert_ok!(submit_finality_proof(2)); + + next_block(); + next_block(); + + next_block(); + assert_ok!(submit_finality_proof(5)); + assert_ok!(submit_finality_proof(7)); + }) + } + + #[test] + fn should_prune_headers_over_headers_to_keep_parameter() { + run_test(|| { + initialize_substrate_bridge(); + assert_ok!(submit_finality_proof(1)); + let first_header = Pallet::::best_finalized(); + next_block(); + + assert_ok!(submit_finality_proof(2)); + next_block(); + assert_ok!(submit_finality_proof(3)); + next_block(); + assert_ok!(submit_finality_proof(4)); + next_block(); + assert_ok!(submit_finality_proof(5)); + next_block(); + + assert_ok!(submit_finality_proof(6)); + + assert!( + !Pallet::::is_known_header(first_header.hash()), + "First header should be pruned." + ); + }) + } +} diff --git a/polkadot/modules/grandpa/src/mock.rs b/polkadot/modules/grandpa/src/mock.rs new file mode 100644 index 00000000000..20f5ea7bdf7 --- /dev/null +++ b/polkadot/modules/grandpa/src/mock.rs @@ -0,0 +1,113 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +// From construct_runtime macro +#![allow(clippy::from_over_into)] + +use bp_runtime::Chain; +use frame_support::{construct_runtime, parameter_types, weights::Weight}; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; + +pub type AccountId = u64; +pub type TestHeader = crate::BridgedHeader; +pub type TestNumber = crate::BridgedBlockNumber; +pub type TestHash = crate::BridgedBlockHash; + +type Block = frame_system::mocking::MockBlock; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + +use crate as grandpa; + +construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Grandpa: grandpa::{Pallet}, + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = (); + type SystemWeightInfo = (); + type DbWeight = (); + type BlockWeights = (); + type BlockLength = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const MaxRequests: u32 = 2; + pub const HeadersToKeep: u32 = 5; + pub const SessionLength: u64 = 5; + pub const NumValidators: u32 = 5; +} + +impl grandpa::Config for TestRuntime { + type BridgedChain = TestBridgedChain; + type MaxRequests = MaxRequests; + type HeadersToKeep = HeadersToKeep; + type WeightInfo = (); +} + +#[derive(Debug)] +pub struct TestBridgedChain; + +impl Chain for TestBridgedChain { + type BlockNumber = ::BlockNumber; + type Hash = ::Hash; + type Hasher = ::Hashing; + type Header = ::Header; +} + +pub fn run_test(test: impl FnOnce() -> T) -> T { + sp_io::TestExternalities::new(Default::default()).execute_with(test) +} + +pub fn test_header(num: TestNumber) -> TestHeader { + // We wrap the call to avoid explicit type annotations in our tests + bp_test_utils::test_header(num) +} diff --git a/polkadot/modules/grandpa/src/weights.rs b/polkadot/modules/grandpa/src/weights.rs new file mode 100644 index 00000000000..a548534a20b --- /dev/null +++ b/polkadot/modules/grandpa/src/weights.rs @@ -0,0 +1,121 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Autogenerated weights for pallet_bridge_grandpa +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-04-14, STEPS: [50, ], REPEAT: 20 +//! LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled +//! CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/rialto-bridge-node +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bridge_grandpa +// --extrinsic=* +// --execution=wasm +// --wasm-execution=Compiled +// --heap-pages=4096 +// --output=./modules/grandpa/src/weights.rs +// --template=./.maintain/rialto-weight-template.hbs + +#![allow(clippy::all)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{ + traits::Get, + weights::{constants::RocksDbWeight, Weight}, +}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_bridge_grandpa. +pub trait WeightInfo { + fn submit_finality_proof(v: u32, p: u32) -> Weight; + fn submit_finality_proof_on_single_fork(v: u32) -> Weight; + fn submit_finality_proof_on_many_forks(p: u32) -> Weight; + fn find_scheduled_change(n: u32) -> Weight; + fn read_write_authority_sets(n: u32) -> Weight; +} + +/// Weights for pallet_bridge_grandpa using the Rialto node and recommended hardware. +pub struct RialtoWeight(PhantomData); +impl WeightInfo for RialtoWeight { + fn submit_finality_proof(v: u32, p: u32) -> Weight { + (0 as Weight) + .saturating_add((837_084_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((874_929_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_single_fork(v: u32) -> Weight { + (276_463_000 as Weight) + .saturating_add((14_149_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_many_forks(p: u32) -> Weight { + (10_676_019_000 as Weight) + .saturating_add((97_598_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + fn find_scheduled_change(n: u32) -> Weight { + (618_000 as Weight).saturating_add((8_000 as Weight).saturating_mul(n as Weight)) + } + fn read_write_authority_sets(n: u32) -> Weight { + (8_582_000 as Weight) + .saturating_add((234_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn submit_finality_proof(v: u32, p: u32) -> Weight { + (0 as Weight) + .saturating_add((837_084_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((874_929_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_single_fork(v: u32) -> Weight { + (276_463_000 as Weight) + .saturating_add((14_149_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn submit_finality_proof_on_many_forks(p: u32) -> Weight { + (10_676_019_000 as Weight) + .saturating_add((97_598_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + fn find_scheduled_change(n: u32) -> Weight { + (618_000 as Weight).saturating_add((8_000 as Weight).saturating_mul(n as Weight)) + } + fn read_write_authority_sets(n: u32) -> Weight { + (8_582_000 as Weight) + .saturating_add((234_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +} diff --git a/polkadot/modules/messages/Cargo.toml b/polkadot/modules/messages/Cargo.toml new file mode 100644 index 00000000000..4a75fa8181f --- /dev/null +++ b/polkadot/modules/messages/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "pallet-bridge-messages" +description = "Module that allows bridged chains to exchange messages using lane concept." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } +num-traits = { version = "0.2", default-features = false } +serde = { version = "1.0.101", optional = true, features = ["derive"] } + +# Bridge dependencies + +bp-messages = { path = "../../primitives/messages", default-features = false } +bp-rialto = { path = "../../primitives/chain-rialto", default-features = false } +bp-runtime = { path = "../../primitives/runtime", default-features = false } + +# Substrate Dependencies + +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +hex = "0.4" +hex-literal = "0.3" +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-runtime/std", + "bp-rialto/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "num-traits/std", + "serde", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] +runtime-benchmarks = [ + "frame-benchmarking", +] diff --git a/polkadot/modules/messages/README.md b/polkadot/modules/messages/README.md new file mode 100644 index 00000000000..eda5e28a6c8 --- /dev/null +++ b/polkadot/modules/messages/README.md @@ -0,0 +1,391 @@ +# Messages Module + +The messages module is used to deliver messages from source chain to target chain. Message is +(almost) opaque to the module and the final goal is to hand message to the message dispatch +mechanism. + +## Contents +- [Overview](#overview) +- [Message Workflow](#message-workflow) +- [Integrating Message Lane Module into Runtime](#integrating-messages-module-into-runtime) +- [Non-Essential Functionality](#non-essential-functionality) +- [Weights of Module Extrinsics](#weights-of-module-extrinsics) + +## Overview + +Message lane is an unidirectional channel, where messages are sent from source chain to the target +chain. At the same time, a single instance of messages module supports both outbound lanes and +inbound lanes. So the chain where the module is deployed (this chain), may act as a source chain for +outbound messages (heading to a bridged chain) and as a target chain for inbound messages (coming +from a bridged chain). + +Messages module supports multiple message lanes. Every message lane is identified with a 4-byte +identifier. Messages sent through the lane are assigned unique (for this lane) increasing integer +value that is known as nonce ("number that can only be used once"). Messages that are sent over the +same lane are guaranteed to be delivered to the target chain in the same order they're sent from +the source chain. In other words, message with nonce `N` will be delivered right before delivering a +message with nonce `N+1`. + +Single message lane may be seen as a transport channel for single application (onchain, offchain or +mixed). At the same time the module itself never dictates any lane or message rules. In the end, it +is the runtime developer who defines what message lane and message mean for this runtime. + +## Message Workflow + +The message "appears" when its submitter calls the `send_message()` function of the module. The +submitter specifies the lane that he's willing to use, the message itself and the fee that he's +willing to pay for the message delivery and dispatch. If a message passes all checks, the nonce is +assigned and the message is stored in the module storage. The message is in an "undelivered" state +now. + +We assume that there are external, offchain actors, called relayers, that are submitting module +related transactions to both target and source chains. The pallet itself has no assumptions about +relayers incentivization scheme, but it has some callbacks for paying rewards. See +[Integrating Messages Module into runtime](#Integrating-Messages-Module-into-runtime) +for details. + +Eventually, some relayer would notice this message in the "undelivered" state and it would decide to +deliver this message. Relayer then crafts `receive_messages_proof()` transaction (aka delivery +transaction) for the messages module instance, deployed at the target chain. Relayer provides +his account id at the source chain, the proof of message (or several messages), the number of +messages in the transaction and their cumulative dispatch weight. Once a transaction is mined, the +message is considered "delivered". + +Once a message is delivered, the relayer may want to confirm delivery back to the source chain. +There are two reasons why he would want to do that. The first is that we intentionally limit number +of "delivered", but not yet "confirmed" messages at inbound lanes +(see [What about other Constants in the Messages Module Configuration Trait](#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) for explanation). +So at some point, the target chain may stop accepting new messages until relayers confirm some of +these. The second is that if the relayer wants to be rewarded for delivery, he must prove the fact +that he has actually delivered the message. And this proof may only be generated after the delivery +transaction is mined. So relayer crafts the `receive_messages_delivery_proof()` transaction (aka +confirmation transaction) for the messages module instance, deployed at the source chain. Once +this transaction is mined, the message is considered "confirmed". + +The "confirmed" state is the final state of the message. But there's one last thing related to the +message - the fact that it is now "confirmed" and reward has been paid to the relayer (or at least +callback for this has been called), must be confirmed to the target chain. Otherwise, we may reach +the limit of "unconfirmed" messages at the target chain and it will stop accepting new messages. So +relayer sometimes includes a nonce of the latest "confirmed" message in the next +`receive_messages_proof()` transaction, proving that some messages have been confirmed. + +## Integrating Messages Module into Runtime + +As it has been said above, the messages module supports both outbound and inbound message lanes. +So if we will integrate a module in some runtime, it may act as the source chain runtime for +outbound messages and as the target chain runtime for inbound messages. In this section, we'll +sometimes refer to the chain we're currently integrating with, as this chain and the other chain as +bridged chain. + +Messages module doesn't simply accept transactions that are claiming that the bridged chain has +some updated data for us. Instead of this, the module assumes that the bridged chain is able to +prove that updated data in some way. The proof is abstracted from the module and may be of any kind. +In our Substrate-to-Substrate bridge we're using runtime storage proofs. Other bridges may use +transaction proofs, Substrate header digests or anything else that may be proved. + +**IMPORTANT NOTE**: everything below in this chapter describes details of the messages module +configuration. But if you interested in well-probed and relatively easy integration of two +Substrate-based chains, you may want to look at the +[bridge-runtime-common](../../bin/runtime-common/README.md) crate. This crate is providing a lot of +helpers for integration, which may be directly used from within your runtime. Then if you'll decide +to change something in this scheme, get back here for detailed information. + +### General Information + +The messages module supports instances. Every module instance is supposed to bridge this chain +and some bridged chain. To bridge with another chain, using another instance is suggested (this +isn't forced anywhere in the code, though). + +Message submitters may track message progress by inspecting module events. When Message is accepted, +the `MessageAccepted` event is emitted in the `send_message()` transaction. The event contains both +message lane identifier and nonce that has been assigned to the message. When a message is delivered +to the target chain, the `MessagesDelivered` event is emitted from the +`receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane +identifier and inclusive range of delivered message nonces. + +### How to plug-in Messages Module to Send Messages to the Bridged Chain? + +The `pallet_bridge_messages::Config` trait has 3 main associated types that are used to work with +outbound messages. The `pallet_bridge_messages::Config::TargetHeaderChain` defines how we see the +bridged chain as the target for our outbound messages. It must be able to check that the bridged +chain may accept our message - like that the message has size below maximal possible transaction +size of the chain and so on. And when the relayer sends us a confirmation transaction, this +implementation must be able to parse and verify the proof of messages delivery. Normally, you would +reuse the same (configurable) type on all chains that are sending messages to the same bridged +chain. + +The `pallet_bridge_messages::Config::LaneMessageVerifier` defines a single callback to verify outbound +messages. The simplest callback may just accept all messages. But in this case you'll need to answer +many questions first. Who will pay for the delivery and confirmation transaction? Are we sure that +someone will ever deliver this message to the bridged chain? Are we sure that we don't bloat our +runtime storage by accepting this message? What if the message is improperly encoded or has some +fields set to invalid values? Answering all those (and similar) questions would lead to correct +implementation. + +There's another thing to consider when implementing type for use in +`pallet_bridge_messages::Config::LaneMessageVerifier`. It is whether we treat all message lanes +identically, or they'll have different sets of verification rules? For example, you may reserve +lane#1 for messages coming from some 'wrapped-token' pallet - then you may verify in your +implementation that the origin is associated with this pallet. Lane#2 may be reserved for 'system' +messages and you may charge zero fee for such messages. You may have some rate limiting for messages +sent over the lane#3. Or you may just verify the same rules set for all outbound messages - it is +all up to the `pallet_bridge_messages::Config::LaneMessageVerifier` implementation. + +The last type is the `pallet_bridge_messages::Config::MessageDeliveryAndDispatchPayment`. When all +checks are made and we have decided to accept the message, we're calling the +`pay_delivery_and_dispatch_fee()` callback, passing the corresponding argument of the `send_message` +function. Later, when message delivery is confirmed, we're calling `pay_relayers_rewards()` +callback, passing accounts of relayers and messages that they have delivered. The simplest +implementation of this trait is in the [`instant_payments.rs`](./src/instant_payments.rs) module and +simply calls `Currency::transfer()` when those callbacks are called. So `Currency` units are +transferred between submitter, 'relayers fund' and relayers accounts. Other implementations may use +more or less sophisticated techniques - the whole relayers incentivization scheme is not a part of +the messages module. + +### I have a Messages Module in my Runtime, but I Want to Reject all Outbound Messages. What shall I do? + +You should be looking at the `bp_messages::source_chain::ForbidOutboundMessages` structure +[`bp_messages::source_chain`](../../primitives/messages/src/source_chain.rs). It implements +all required traits and will simply reject all transactions, related to outbound messages. + +### How to plug-in Messages Module to Receive Messages from the Bridged Chain? + +The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with +inbound messages. The `pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the +bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction, +this implementation must be able to parse and verify the proof of messages wrapped in this +transaction. Normally, you would reuse the same (configurable) type on all chains that are sending +messages to the same bridged chain. + +The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered +messages. Apart from actually dispatching the message, the implementation must return the correct +dispatch weight of the message before dispatch is called. + +### I have a Messages Module in my Runtime, but I Want to Reject all Inbound Messages. What +shall I do? + +You should be looking at the `bp_messages::target_chain::ForbidInboundMessages` structure from +the [`bp_messages::target_chain`](../../primitives/messages/src/target_chain.rs) module. It +implements all required traits and will simply reject all transactions, related to inbound messages. + +### What about other Constants in the Messages Module Configuration Trait? + +Message is being stored in the source chain storage until its delivery will be confirmed. After +that, we may safely remove the message from the storage. Lane messages are removed (pruned) when +someone sends a new message using the same lane. So the message submitter pays for that pruning. To +avoid pruning too many messages in a single transaction, there's +`pallet_bridge_messages::Config::MaxMessagesToPruneAtOnce` configuration parameter. We will never prune +more than this number of messages in the single transaction. That said, the value should not be too +big to avoid waste of resources when there are no messages to prune. + +To be able to reward the relayer for delivering messages, we store a map of message nonces range => +identifier of the relayer that has delivered this range at the target chain runtime storage. If a +relayer delivers multiple consequent ranges, they're merged into single entry. So there may be more +than one entry for the same relayer. Eventually, this whole map must be delivered back to the source +chain to confirm delivery and pay rewards. So to make sure we are able to craft this confirmation +transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure that +the weight of processing this map is below a certain limit. Both size and processing weight mostly +depend on the number of entries. The number of entries is limited with the +`pallet_bridge_messages::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight +also depends on the total number of messages that are being confirmed, because every confirmed +message needs to be read. So there's another +`pallet_bridge_messages::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. + +When choosing values for these parameters, you must also keep in mind that if proof in your scheme +is based on finality of headers (and it is the most obvious option for Substrate-based chains with +finality notion), then choosing too small values for these parameters may cause significant delays +in message delivery. That's because there too many actors involved in this scheme: 1) authorities +that are finalizing headers of the target chain need to finalize header with non-empty map; 2) the +headers relayer then needs to submit this header and its finality proof to the source chain; 3) the +messages relayer must then send confirmation transaction (storage proof of this map) to the source +chain; 4) when the confirmation transaction will be mined at some header, source chain authorities +must finalize this header; 5) the headers relay then needs to submit this header and its finality +proof to the target chain; 6) only now the messages relayer may submit new messages from the source +to target chain and prune the entry from the map. + +Delivery transaction requires the relayer to provide both number of entries and total number of +messages in the map. This means that the module never charges an extra cost for delivering a map - +the relayer would need to pay exactly for the number of entries+messages it has delivered. So the +best guess for values of these parameters would be the pair that would occupy `N` percent of the +maximal transaction size and weight of the source chain. The `N` should be large enough to process +large maps, at the same time keeping reserve for future source chain upgrades. + +## Non-Essential Functionality + +Apart from the message related calls, the module exposes a set of auxiliary calls. They fall in two +groups, described in the next two paragraphs. + +There may be a special account in every runtime where the messages module is deployed. This +account, named 'module owner', is like a module-level sudo account - he's able to halt all and +result all module operations without requiring runtime upgrade. The module may have no message +owner, but we suggest to use it at least for initial deployment. To calls that are related to this +account are: +- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; +- `fn halt_operations()`: the module owner (or sudo account) may call this function to stop all + module operations. After this call, all message-related transactions will be rejected until + further `resume_operations` call'. This call may be used when something extraordinary happens with + the bridge; +- `fn resume_operations()`: module owner may call this function to resume bridge operations. The + module will resume its regular operations after this call. + +Apart from halting and resuming the bridge, the module owner may also tune module configuration +parameters without runtime upgrades. The set of parameters needs to be designed in advance, though. +The module configuration trait has associated `Parameter` type, which may be e.g. enum and represent +a set of parameters that may be updated by the module owner. For example, if your bridge needs to +convert sums between different tokens, you may define a 'conversion rate' parameter and let the +module owner update this parameter when there are significant changes in the rate. The corresponding +module call is `fn update_pallet_parameter()`. + +## Weights of Module Extrinsics + +The main assumptions behind weight formulas is: +- all possible costs are paid in advance by the message submitter; +- whenever possible, relayer tries to minimize cost of its transactions. So e.g. even though sender + always pays for delivering outbound lane state proof, relayer may not include it in the delivery + transaction (unless messages module on target chain requires that); +- weight formula should incentivize relayer to not to submit any redundant data in the extrinsics + arguments; +- the extrinsic shall never be executing slower (i.e. has larger actual weight) than defined by the + formula. + +### Weight of `send_message` call + +#### Related benchmarks + +| Benchmark | Description | +|-----------------------------------|-----------------------------------------------------| +`send_minimal_message_worst_case` | Sends 0-size message with worst possible conditions | +`send_1_kb_message_worst_case` | Sends 1KB-size message with worst possible conditions | +`send_16_kb_message_worst_case` | Sends 16KB-size message with worst possible conditions | + +#### Weight formula + +The weight formula is: +``` +Weight = BaseWeight + MessageSizeInKilobytes * MessageKiloByteSendWeight +``` + +Where: + +| Component | How it is computed? | Description | +|-----------------------------|------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| `SendMessageOverhead` | `send_minimal_message_worst_case` | Weight of sending minimal (0 bytes) message | +| `MessageKiloByteSendWeight` | `(send_16_kb_message_worst_case - send_1_kb_message_worst_case)/15` | Weight of sending every additional kilobyte of the message | + +### Weight of `receive_messages_proof` call + +#### Related benchmarks + +| Benchmark | Description* | +|---------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------| +| `receive_single_message_proof` | Receives proof of single `EXPECTED_DEFAULT_MESSAGE_LENGTH` message | +| `receive_two_messages_proof` | Receives proof of two identical `EXPECTED_DEFAULT_MESSAGE_LENGTH` messages | +| `receive_single_message_proof_with_outbound_lane_state` | Receives proof of single `EXPECTED_DEFAULT_MESSAGE_LENGTH` message and proof of outbound lane state at the source chain | +| `receive_single_message_proof_1_kb` | Receives proof of single message. The proof has size of approximately 1KB** | +| `receive_single_message_proof_16_kb` | Receives proof of single message. The proof has size of approximately 16KB** | + +*\* - In all benchmarks all received messages are dispatched and their dispatch cost is near to zero* + +*\*\* - Trie leafs are assumed to have minimal values. The proof is derived from the minimal proof +by including more trie nodes. That's because according to `receive_message_proofs_with_large_leaf` +and `receive_message_proofs_with_extra_nodes` benchmarks, increasing proof by including more nodes +has slightly larger impact on performance than increasing values stored in leafs*. + +#### Weight formula + +The weight formula is: +``` +Weight = BaseWeight + OutboundStateDeliveryWeight + + MessagesCount * MessageDeliveryWeight + + MessagesDispatchWeight + + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight +``` + +Where: + +| Component | How it is computed? | Description | +|-------------------------------|------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `BaseWeight` | `2*receive_single_message_proof - receive_two_messages_proof` | Weight of receiving and parsing minimal proof | +| `OutboundStateDeliveryWeight` | `receive_single_message_proof_with_outbound_lane_state - receive_single_message_proof` | Additional weight when proof includes outbound lane state | +| `MessageDeliveryWeight` | `receive_two_messages_proof - receive_single_message_proof` | Weight of of parsing and dispatching (without actual dispatch cost) of every message | +| `MessagesCount` | | Provided by relayer | +| `MessagesDispatchWeight` | | Provided by relayer | +| `ActualProofSize` | | Provided by relayer | +| `ExpectedProofSize` | `EXPECTED_DEFAULT_MESSAGE_LENGTH * MessagesCount + EXTRA_STORAGE_PROOF_SIZE` | Size of proof that we are expecting. This only includes `EXTRA_STORAGE_PROOF_SIZE` once, because we assume that intermediate nodes likely to be included in the proof only once. This may be wrong, but since weight of processing proof with many nodes is almost equal to processing proof with large leafs, additional cost will be covered because we're charging for extra proof bytes anyway | +| `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)` | Weight of processing every additional proof byte over `ExpectedProofSize` limit | + +#### Why for every message sent using `send_message` we will be able to craft `receive_messages_proof` transaction? + +We have following checks in `send_message` transaction on the source chain: +- message size should be less than or equal to `2/3` of maximal extrinsic size on the target chain; +- message dispatch weight should be less than or equal to the `1/2` of maximal extrinsic dispatch + weight on the target chain. + +Delivery transaction is an encoded delivery call and signed extensions. So we have `1/3` of maximal +extrinsic size reserved for: +- storage proof, excluding the message itself. Currently, on our test chains, the overhead is always + within `EXTRA_STORAGE_PROOF_SIZE` limits (1024 bytes); +- signed extras and other call arguments (`relayer_id: SourceChain::AccountId`, `messages_count: + u32`, `dispatch_weight: u64`). + +On Millau chain, maximal extrinsic size is `0.75 * 2MB`, so `1/3` is `512KB` (`524_288` bytes). This +should be enough to cover these extra arguments and signed extensions. + +Let's exclude message dispatch cost from single message delivery transaction weight formula: +``` +Weight = BaseWeight + OutboundStateDeliveryWeight + MessageDeliveryWeight + + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight +``` + +So we have `1/2` of maximal extrinsic weight to cover these components. `BaseWeight`, +`OutboundStateDeliveryWeight` and `MessageDeliveryWeight` are determined using benchmarks and are +hardcoded into runtime. Adequate relayer would only include required trie nodes into the proof. So +if message size would be maximal (`2/3` of `MaximalExtrinsicSize`), then the extra proof size would +be `MaximalExtrinsicSize / 3 * 2 - EXPECTED_DEFAULT_MESSAGE_LENGTH`. + +Both conditions are verified by `pallet_bridge_messages::ensure_weights_are_correct` and +`pallet_bridge_messages::ensure_able_to_receive_messages` functions, which must be called from every +runtime's tests. + +### Weight of `receive_messages_delivery_proof` call + +#### Related benchmarks + +| Benchmark | Description | +|-------------------------------------------------------------|------------------------------------------------------------------------------------------| +| `receive_delivery_proof_for_single_message` | Receives proof of single message delivery | +| `receive_delivery_proof_for_two_messages_by_single_relayer` | Receives proof of two messages delivery. Both messages are delivered by the same relayer | +| `receive_delivery_proof_for_two_messages_by_two_relayers` | Receives proof of two messages delivery. Messages are delivered by different relayers | + +#### Weight formula + +The weight formula is: +``` +Weight = BaseWeight + MessagesCount * MessageConfirmationWeight + + RelayersCount * RelayerRewardWeight + + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight +``` + +Where: + +| Component | How it is computed? | Description | +|---------------------------|-----------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `BaseWeight` | `2*receive_delivery_proof_for_single_message - receive_delivery_proof_for_two_messages_by_single_relayer` | Weight of receiving and parsing minimal delivery proof | +| `MessageDeliveryWeight` | `receive_delivery_proof_for_two_messages_by_single_relayer - receive_delivery_proof_for_single_message` | Weight of confirming every additional message | +| `MessagesCount` | | Provided by relayer | +| `RelayerRewardWeight` | `receive_delivery_proof_for_two_messages_by_two_relayers - receive_delivery_proof_for_two_messages_by_single_relayer` | Weight of rewarding every additional relayer | +| `RelayersCount` | | Provided by relayer | +| `ActualProofSize` | | Provided by relayer | +| `ExpectedProofSize` | `EXTRA_STORAGE_PROOF_SIZE` | Size of proof that we are expecting | +| `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)` | Weight of processing every additional proof byte over `ExpectedProofSize` limit. We're using the same formula, as for message delivery, because proof mechanism is assumed to be the same in both cases | + +#### Why we're always able to craft `receive_messages_delivery_proof` transaction? + +There can be at most `::MaxUnconfirmedMessagesAtInboundLane` +messages and at most +`::MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded +relayers in the single delivery confirmation transaction. + +We're checking that this transaction may be crafted in the +`pallet_bridge_messages::ensure_able_to_receive_confirmation` function, which must be called from every +runtime' tests. diff --git a/polkadot/modules/messages/src/benchmarking.rs b/polkadot/modules/messages/src/benchmarking.rs new file mode 100644 index 00000000000..d1ecf775000 --- /dev/null +++ b/polkadot/modules/messages/src/benchmarking.rs @@ -0,0 +1,830 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Messages pallet benchmarking. + +use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH; +use crate::{inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, Call, Instance}; + +use bp_messages::{ + source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, InboundLaneData, LaneId, MessageData, + MessageNonce, OutboundLaneData, UnrewardedRelayersState, +}; +use frame_benchmarking::{account, benchmarks_instance}; +use frame_support::{traits::Get, weights::Weight}; +use frame_system::RawOrigin; +use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, ops::RangeInclusive, prelude::*}; + +/// Fee paid by submitter for single message delivery. +pub const MESSAGE_FEE: u64 = 10_000_000_000; + +const SEED: u32 = 0; + +/// Pallet we're benchmarking here. +pub struct Pallet, I: crate::Instance>(crate::Pallet); + +/// Proof size requirements. +pub enum ProofSize { + /// The proof is expected to be minimal. If value size may be changed, then it is expected to + /// have given size. + Minimal(u32), + /// The proof is expected to have at least given size and grow by increasing number of trie nodes + /// included in the proof. + HasExtraNodes(u32), + /// The proof is expected to have at least given size and grow by increasing value that is stored + /// in the trie. + HasLargeLeaf(u32), +} + +/// Benchmark-specific message parameters. +pub struct MessageParams { + /// Size of the message payload. + pub size: u32, + /// Message sender account. + pub sender_account: ThisAccountId, +} + +/// Benchmark-specific message proof parameters. +pub struct MessageProofParams { + /// Id of the lane. + pub lane: LaneId, + /// Range of messages to include in the proof. + pub message_nonces: RangeInclusive, + /// If `Some`, the proof needs to include this outbound lane data. + pub outbound_lane_data: Option, + /// Proof size requirements. + pub size: ProofSize, +} + +/// Benchmark-specific message delivery proof parameters. +pub struct MessageDeliveryProofParams { + /// Id of the lane. + pub lane: LaneId, + /// The proof needs to include this inbound lane data. + pub inbound_lane_data: InboundLaneData, + /// Proof size requirements. + pub size: ProofSize, +} + +/// Trait that must be implemented by runtime. +pub trait Config: crate::Config { + /// Lane id to use in benchmarks. + fn bench_lane_id() -> LaneId { + Default::default() + } + /// Get maximal size of the message payload. + fn maximal_message_size() -> u32; + /// Return id of relayer account at the bridged chain. + fn bridged_relayer_id() -> Self::InboundRelayer; + /// Return balance of given account. + fn account_balance(account: &Self::AccountId) -> Self::OutboundMessageFee; + /// Create given account and give it enough balance for test purposes. + fn endow_account(account: &Self::AccountId); + /// Prepare message to send over lane. + fn prepare_outbound_message( + params: MessageParams, + ) -> (Self::OutboundPayload, Self::OutboundMessageFee); + /// Prepare messages proof to receive by the module. + fn prepare_message_proof( + params: MessageProofParams, + ) -> ( + >::MessagesProof, + Weight, + ); + /// Prepare messages delivery proof to receive by the module. + fn prepare_message_delivery_proof( + params: MessageDeliveryProofParams, + ) -> >::MessagesDeliveryProof; +} + +benchmarks_instance! { + // + // Benchmarks that are used directly by the runtime. + // + + // Benchmark `send_message` extrinsic with the worst possible conditions: + // * outbound lane already has state, so it needs to be read and decoded; + // * relayers fund account does not exists (in practice it needs to exist in production environment); + // * maximal number of messages is being pruned during the call; + // * message size is minimal for the target chain. + // + // Result of this benchmark is used as a base weight for `send_message` call. Then the 'message weight' + // (estimated using `send_half_maximal_message_worst_case` and `send_maximal_message_worst_case`) is + // added. + send_minimal_message_worst_case { + let lane_id = T::bench_lane_id(); + let sender = account("sender", 0, SEED); + T::endow_account(&sender); + + // 'send' messages that are to be pruned when our message is sent + for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { + send_regular_message::(); + } + confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); + + let (payload, fee) = T::prepare_outbound_message(MessageParams { + size: 0, + sender_account: sender.clone(), + }); + }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) + verify { + assert_eq!( + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), + T::MaxMessagesToPruneAtOnce::get() + 1, + ); + } + + // Benchmark `send_message` extrinsic with the worst possible conditions: + // * outbound lane already has state, so it needs to be read and decoded; + // * relayers fund account does not exists (in practice it needs to exist in production environment); + // * maximal number of messages is being pruned during the call; + // * message size is 1KB. + // + // With single KB of message size, the weight of the call is increased (roughly) by + // `(send_16_kb_message_worst_case - send_1_kb_message_worst_case) / 15`. + send_1_kb_message_worst_case { + let lane_id = T::bench_lane_id(); + let sender = account("sender", 0, SEED); + T::endow_account(&sender); + + // 'send' messages that are to be pruned when our message is sent + for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { + send_regular_message::(); + } + confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); + + let size = 1024; + assert!( + T::maximal_message_size() > size, + "This benchmark can only be used with runtime that accepts 1KB messages", + ); + + let (payload, fee) = T::prepare_outbound_message(MessageParams { + size, + sender_account: sender.clone(), + }); + }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) + verify { + assert_eq!( + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), + T::MaxMessagesToPruneAtOnce::get() + 1, + ); + } + + // Benchmark `send_message` extrinsic with the worst possible conditions: + // * outbound lane already has state, so it needs to be read and decoded; + // * relayers fund account does not exists (in practice it needs to exist in production environment); + // * maximal number of messages is being pruned during the call; + // * message size is 16KB. + // + // With single KB of message size, the weight of the call is increased (roughly) by + // `(send_16_kb_message_worst_case - send_1_kb_message_worst_case) / 15`. + send_16_kb_message_worst_case { + let lane_id = T::bench_lane_id(); + let sender = account("sender", 0, SEED); + T::endow_account(&sender); + + // 'send' messages that are to be pruned when our message is sent + for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { + send_regular_message::(); + } + confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); + + let size = 16 * 1024; + assert!( + T::maximal_message_size() > size, + "This benchmark can only be used with runtime that accepts 16KB messages", + ); + + let (payload, fee) = T::prepare_outbound_message(MessageParams { + size, + sender_account: sender.clone(), + }); + }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) + verify { + assert_eq!( + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), + T::MaxMessagesToPruneAtOnce::get() + 1, + ); + } + + // Benchmark `increase_message_fee` with following conditions: + // * message has maximal message; + // * submitter account is killed because its balance is less than ED after payment. + increase_message_fee { + let sender = account("sender", 42, SEED); + T::endow_account(&sender); + + let additional_fee = T::account_balance(&sender); + let lane_id = T::bench_lane_id(); + let nonce = 1; + + send_regular_message_with_payload::(vec![42u8; T::maximal_message_size() as _]); + }: increase_message_fee(RawOrigin::Signed(sender.clone()), lane_id, nonce, additional_fee) + verify { + assert_eq!(T::account_balance(&sender), 0.into()); + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // This is base benchmark for all other message delivery benchmarks. + receive_single_message_proof { + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=21, + outbound_lane_data: None, + size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + }); + }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 21, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions: + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // The weight of single message delivery could be approximated as + // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. + // This won't be super-accurate if message has non-zero dispatch weight, but estimation should + // be close enough to real weight. + receive_two_messages_proof { + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=22, + outbound_lane_data: None, + size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + }); + }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 22, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // * proof includes outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // The weight of outbound lane state delivery would be + // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. + // This won't be super-accurate if message has non-zero dispatch weight, but estimation should + // be close enough to real weight. + receive_single_message_proof_with_outbound_lane_state { + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=21, + outbound_lane_data: Some(OutboundLaneData { + oldest_unpruned_nonce: 21, + latest_received_nonce: 20, + latest_generated_nonce: 21, + }), + size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + }); + }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 21, + ); + assert_eq!( + crate::Pallet::::inbound_latest_confirmed_nonce(T::bench_lane_id()), + 20, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // * the proof has many redundand trie nodes with total size of approximately 1KB; + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // With single KB of messages proof, the weight of the call is increased (roughly) by + // `(receive_single_message_proof_16KB - receive_single_message_proof_1_kb) / 15`. + receive_single_message_proof_1_kb { + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=21, + outbound_lane_data: None, + size: ProofSize::HasExtraNodes(1024), + }); + }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 21, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // * the proof has many redundand trie nodes with total size of approximately 16KB; + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // Size of proof grows because it contains extra trie nodes in it. + // + // With single KB of messages proof, the weight of the call is increased (roughly) by + // `(receive_single_message_proof_16KB - receive_single_message_proof) / 15`. + receive_single_message_proof_16_kb { + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=21, + outbound_lane_data: None, + size: ProofSize::HasExtraNodes(16 * 1024), + }); + }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 21, + ); + } + + // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: + // * single relayer is rewarded for relaying single message; + // * relayer account does not exist (in practice it needs to exist in production environment). + // + // This is base benchmark for all other confirmations delivery benchmarks. + receive_delivery_proof_for_single_message { + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayer_id: T::AccountId = account("relayer", 0, SEED); + let relayer_balance = T::account_balance(&relayer_id); + T::endow_account(&relayers_fund_id); + + // send message that we're going to confirm + send_regular_message::(); + + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: 1, + }; + let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { + lane: T::bench_lane_id(), + inbound_lane_data: InboundLaneData { + relayers: vec![(1, 1, relayer_id.clone())].into_iter().collect(), + last_confirmed_nonce: 0, + }, + size: ProofSize::Minimal(0), + }); + }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) + verify { + assert_eq!( + T::account_balance(&relayer_id), + relayer_balance + MESSAGE_FEE.into(), + ); + } + + // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: + // * single relayer is rewarded for relaying two messages; + // * relayer account does not exist (in practice it needs to exist in production environment). + // + // Additional weight for paying single-message reward to the same relayer could be computed + // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) + // - weight(receive_delivery_proof_for_single_message)`. + receive_delivery_proof_for_two_messages_by_single_relayer { + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayer_id: T::AccountId = account("relayer", 0, SEED); + let relayer_balance = T::account_balance(&relayer_id); + T::endow_account(&relayers_fund_id); + + // send message that we're going to confirm + send_regular_message::(); + send_regular_message::(); + + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 2, + total_messages: 2, + }; + let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { + lane: T::bench_lane_id(), + inbound_lane_data: InboundLaneData { + relayers: vec![(1, 2, relayer_id.clone())].into_iter().collect(), + last_confirmed_nonce: 0, + }, + size: ProofSize::Minimal(0), + }); + }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) + verify { + ensure_relayer_rewarded::(&relayer_id, &relayer_balance); + } + + // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: + // * two relayers are rewarded for relaying single message each; + // * relayer account does not exist (in practice it needs to exist in production environment). + // + // Additional weight for paying reward to the next relayer could be computed + // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) + // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. + receive_delivery_proof_for_two_messages_by_two_relayers { + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayer1_id: T::AccountId = account("relayer1", 1, SEED); + let relayer1_balance = T::account_balance(&relayer1_id); + let relayer2_id: T::AccountId = account("relayer2", 2, SEED); + let relayer2_balance = T::account_balance(&relayer2_id); + T::endow_account(&relayers_fund_id); + + // send message that we're going to confirm + send_regular_message::(); + send_regular_message::(); + + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + }; + let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { + lane: T::bench_lane_id(), + inbound_lane_data: InboundLaneData { + relayers: vec![ + (1, 1, relayer1_id.clone()), + (2, 2, relayer2_id.clone()), + ].into_iter().collect(), + last_confirmed_nonce: 0, + }, + size: ProofSize::Minimal(0), + }); + }: receive_messages_delivery_proof(RawOrigin::Signed(relayer1_id.clone()), proof, relayers_state) + verify { + ensure_relayer_rewarded::(&relayer1_id, &relayer1_balance); + ensure_relayer_rewarded::(&relayer2_id, &relayer2_balance); + } + + // + // Benchmarks for manual checks. + // + + // Benchmark `send_message` extrinsic with following conditions: + // * outbound lane already has state, so it needs to be read and decoded; + // * relayers fund account does not exists (in practice it needs to exist in production environment); + // * maximal number of messages is being pruned during the call; + // * message size varies from minimal to maximal for the target chain. + // + // Results of this benchmark may be used to check how message size affects `send_message` performance. + send_messages_of_various_lengths { + let i in 0..T::maximal_message_size().try_into().unwrap_or_default(); + + let lane_id = T::bench_lane_id(); + let sender = account("sender", 0, SEED); + T::endow_account(&sender); + + // 'send' messages that are to be pruned when our message is sent + for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { + send_regular_message::(); + } + confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); + + let (payload, fee) = T::prepare_outbound_message(MessageParams { + size: i as _, + sender_account: sender.clone(), + }); + }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) + verify { + assert_eq!( + crate::Pallet::::outbound_latest_generated_nonce(T::bench_lane_id()), + T::MaxMessagesToPruneAtOnce::get() + 1, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with multiple minimal-weight messages and following conditions: + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // This benchmarks gives us an approximation of single message delivery weight. It is similar to the + // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. So it may be used + // to verify that the other approximation is correct. + receive_multiple_messages_proof { + let i in 1..64; + + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + let messages_count = i as _; + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=(20 + i as MessageNonce), + outbound_lane_data: None, + size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), + }); + }: receive_messages_proof( + RawOrigin::Signed(relayer_id_on_target), + relayer_id_on_source, + proof, + messages_count, + dispatch_weight + ) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 20 + i as MessageNonce, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // Results of this benchmark may be used to check how proof size affects `receive_message_proof` performance. + receive_message_proofs_with_extra_nodes { + let i in 0..T::maximal_message_size(); + + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + let messages_count = 1u32; + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=21, + outbound_lane_data: None, + size: ProofSize::HasExtraNodes(i as _), + }); + }: receive_messages_proof( + RawOrigin::Signed(relayer_id_on_target), + relayer_id_on_source, + proof, + messages_count, + dispatch_weight + ) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 21, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: + // * proof does not include outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // Results of this benchmark may be used to check how message size affects `receive_message_proof` performance. + receive_message_proofs_with_large_leaf { + let i in 0..T::maximal_message_size(); + + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + let messages_count = 1u32; + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=21, + outbound_lane_data: None, + size: ProofSize::HasLargeLeaf(i as _), + }); + }: receive_messages_proof( + RawOrigin::Signed(relayer_id_on_target), + relayer_id_on_source, + proof, + messages_count, + dispatch_weight + ) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 21, + ); + } + + // Benchmark `receive_messages_proof` extrinsic with multiple minimal-weight messages and following conditions: + // * proof includes outbound lane state proof; + // * inbound lane already has state, so it needs to be read and decoded; + // * message is successfully dispatched; + // * message requires all heavy checks done by dispatcher. + // + // This benchmarks gives us an approximation of outbound lane state delivery weight. It is similar to the + // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. + // So it may be used to verify that the other approximation is correct. + receive_multiple_messages_proof_with_outbound_lane_state { + let i in 1..128; + + let relayer_id_on_source = T::bridged_relayer_id(); + let relayer_id_on_target = account("relayer", 0, SEED); + let messages_count = i as _; + + // mark messages 1..=20 as delivered + receive_messages::(20); + + let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { + lane: T::bench_lane_id(), + message_nonces: 21..=20 + i as MessageNonce, + outbound_lane_data: Some(OutboundLaneData { + oldest_unpruned_nonce: 21, + latest_received_nonce: 20, + latest_generated_nonce: 21, + }), + size: ProofSize::Minimal(0), + }); + }: receive_messages_proof( + RawOrigin::Signed(relayer_id_on_target), + relayer_id_on_source, + proof, + messages_count, + dispatch_weight + ) + verify { + assert_eq!( + crate::Pallet::::inbound_latest_received_nonce(T::bench_lane_id()), + 20 + i as MessageNonce, + ); + assert_eq!( + crate::Pallet::::inbound_latest_confirmed_nonce(T::bench_lane_id()), + 20, + ); + } + + // Benchmark `receive_messages_delivery_proof` extrinsic where single relayer delivers multiple messages. + receive_delivery_proof_for_multiple_messages_by_single_relayer { + // there actually should be used value of `MaxUnrewardedRelayerEntriesAtInboundLane` from the bridged + // chain, but we're more interested in additional weight/message than in max weight + let i in 1..T::MaxUnrewardedRelayerEntriesAtInboundLane::get() + .try_into() + .expect("Value of MaxUnrewardedRelayerEntriesAtInboundLane is too large"); + + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let relayer_id: T::AccountId = account("relayer", 0, SEED); + let relayer_balance = T::account_balance(&relayer_id); + T::endow_account(&relayers_fund_id); + + // send messages that we're going to confirm + for _ in 1..=i { + send_regular_message::(); + } + + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + messages_in_oldest_entry: 1, + total_messages: i as MessageNonce, + }; + let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { + lane: T::bench_lane_id(), + inbound_lane_data: InboundLaneData { + relayers: vec![(1, i as MessageNonce, relayer_id.clone())].into_iter().collect(), + last_confirmed_nonce: 0, + }, + size: ProofSize::Minimal(0), + }); + }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) + verify { + ensure_relayer_rewarded::(&relayer_id, &relayer_balance); + } + + // Benchmark `receive_messages_delivery_proof` extrinsic where every relayer delivers single messages. + receive_delivery_proof_for_multiple_messages_by_multiple_relayers { + // there actually should be used value of `MaxUnconfirmedMessagesAtInboundLane` from the bridged + // chain, but we're more interested in additional weight/message than in max weight + let i in 1..T::MaxUnconfirmedMessagesAtInboundLane::get() + .try_into() + .expect("Value of MaxUnconfirmedMessagesAtInboundLane is too large "); + + let relayers_fund_id = crate::Pallet::::relayer_fund_account_id(); + let confirmation_relayer_id = account("relayer", 0, SEED); + let relayers: BTreeMap = (1..=i) + .map(|j| { + let relayer_id = account("relayer", j + 1, SEED); + let relayer_balance = T::account_balance(&relayer_id); + (relayer_id, relayer_balance) + }) + .collect(); + T::endow_account(&relayers_fund_id); + + // send messages that we're going to confirm + for _ in 1..=i { + send_regular_message::(); + } + + let relayers_state = UnrewardedRelayersState { + unrewarded_relayer_entries: i as MessageNonce, + messages_in_oldest_entry: 1, + total_messages: i as MessageNonce, + }; + let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { + lane: T::bench_lane_id(), + inbound_lane_data: InboundLaneData { + relayers: relayers + .keys() + .enumerate() + .map(|(j, relayer_id)| (j as MessageNonce + 1, j as MessageNonce + 1, relayer_id.clone())) + .collect(), + last_confirmed_nonce: 0, + }, + size: ProofSize::Minimal(0), + }); + }: receive_messages_delivery_proof(RawOrigin::Signed(confirmation_relayer_id), proof, relayers_state) + verify { + for (relayer_id, prev_balance) in relayers { + ensure_relayer_rewarded::(&relayer_id, &prev_balance); + } + } +} + +fn send_regular_message, I: Instance>() { + let mut outbound_lane = outbound_lane::(T::bench_lane_id()); + outbound_lane.send_message(MessageData { + payload: vec![], + fee: MESSAGE_FEE.into(), + }); +} + +fn send_regular_message_with_payload, I: Instance>(payload: Vec) { + let mut outbound_lane = outbound_lane::(T::bench_lane_id()); + outbound_lane.send_message(MessageData { + payload, + fee: MESSAGE_FEE.into(), + }); +} + +fn confirm_message_delivery, I: Instance>(nonce: MessageNonce) { + let mut outbound_lane = outbound_lane::(T::bench_lane_id()); + assert!(outbound_lane.confirm_delivery(nonce).is_some()); +} + +fn receive_messages, I: Instance>(nonce: MessageNonce) { + let mut inbound_lane_storage = inbound_lane_storage::(T::bench_lane_id()); + inbound_lane_storage.set_data(InboundLaneData { + relayers: vec![(1, nonce, T::bridged_relayer_id())].into_iter().collect(), + last_confirmed_nonce: 0, + }); +} + +fn ensure_relayer_rewarded, I: Instance>(relayer_id: &T::AccountId, old_balance: &T::OutboundMessageFee) { + let new_balance = T::account_balance(relayer_id); + assert!( + new_balance > *old_balance, + "Relayer haven't received reward for relaying message: old balance = {:?}, new balance = {:?}", + old_balance, + new_balance, + ); +} diff --git a/polkadot/modules/messages/src/inbound_lane.rs b/polkadot/modules/messages/src/inbound_lane.rs new file mode 100644 index 00000000000..b5576bc30a1 --- /dev/null +++ b/polkadot/modules/messages/src/inbound_lane.rs @@ -0,0 +1,397 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Everything about incoming messages receival. + +use bp_messages::{ + target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, + InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, +}; +use sp_std::prelude::PartialEq; + +/// Inbound lane storage. +pub trait InboundLaneStorage { + /// Delivery and dispatch fee type on source chain. + type MessageFee; + /// Id of relayer on source chain. + type Relayer: PartialEq; + + /// Lane id. + fn id(&self) -> LaneId; + /// Return maximal number of unrewarded relayer entries in inbound lane. + fn max_unrewarded_relayer_entries(&self) -> MessageNonce; + /// Return maximal number of unconfirmed messages in inbound lane. + fn max_unconfirmed_messages(&self) -> MessageNonce; + /// Get lane data from the storage. + fn data(&self) -> InboundLaneData; + /// Update lane data in the storage. + fn set_data(&mut self, data: InboundLaneData); +} + +/// Inbound messages lane. +pub struct InboundLane { + storage: S, +} + +impl InboundLane { + /// Create new inbound lane backed by given storage. + pub fn new(storage: S) -> Self { + InboundLane { storage } + } + + /// Receive state of the corresponding outbound lane. + pub fn receive_state_update(&mut self, outbound_lane_data: OutboundLaneData) -> Option { + let mut data = self.storage.data(); + let last_delivered_nonce = data.last_delivered_nonce(); + + if outbound_lane_data.latest_received_nonce > last_delivered_nonce { + // this is something that should never happen if proofs are correct + return None; + } + if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce { + return None; + } + + let new_confirmed_nonce = outbound_lane_data.latest_received_nonce; + data.last_confirmed_nonce = new_confirmed_nonce; + // Firstly, remove all of the records where higher nonce <= new confirmed nonce + while data + .relayers + .front() + .map(|(_, nonce_high, _)| *nonce_high <= new_confirmed_nonce) + .unwrap_or(false) + { + data.relayers.pop_front(); + } + // Secondly, update the next record with lower nonce equal to new confirmed nonce if needed. + // Note: There will be max. 1 record to update as we don't allow messages from relayers to overlap. + match data.relayers.front_mut() { + Some((nonce_low, _, _)) if *nonce_low < new_confirmed_nonce => { + *nonce_low = new_confirmed_nonce + 1; + } + _ => {} + } + + self.storage.set_data(data); + Some(outbound_lane_data.latest_received_nonce) + } + + /// Receive new message. + pub fn receive_message>( + &mut self, + relayer: S::Relayer, + nonce: MessageNonce, + message_data: DispatchMessageData, + ) -> bool { + let mut data = self.storage.data(); + let is_correct_message = nonce == data.last_delivered_nonce() + 1; + if !is_correct_message { + return false; + } + + // if there are more unrewarded relayer entries than we may accept, reject this message + if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { + return false; + } + + // if there are more unconfirmed messages than we may accept, reject this message + let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); + if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { + return false; + } + + let push_new = match data.relayers.back_mut() { + Some((_, nonce_high, last_relayer)) if last_relayer == &relayer => { + *nonce_high = nonce; + false + } + _ => true, + }; + if push_new { + data.relayers.push_back((nonce, nonce, relayer)); + } + + self.storage.set_data(data); + + P::dispatch(DispatchMessage { + key: MessageKey { + lane_id: self.storage.id(), + nonce, + }, + data: message_data, + }); + + true + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + inbound_lane, + mock::{ + message_data, run_test, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, + TEST_RELAYER_B, TEST_RELAYER_C, + }, + DefaultInstance, RuntimeInboundLaneStorage, + }; + + fn receive_regular_message( + lane: &mut InboundLane>, + nonce: MessageNonce, + ) { + assert!(lane.receive_message::( + TEST_RELAYER_A, + nonce, + message_data(REGULAR_PAYLOAD).into() + )); + } + + #[test] + fn receive_status_update_ignores_status_from_the_future() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + receive_regular_message(&mut lane, 1); + assert_eq!( + lane.receive_state_update(OutboundLaneData { + latest_received_nonce: 10, + ..Default::default() + }), + None, + ); + + assert_eq!(lane.storage.data().last_confirmed_nonce, 0); + }); + } + + #[test] + fn receive_status_update_ignores_obsolete_status() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + receive_regular_message(&mut lane, 1); + receive_regular_message(&mut lane, 2); + receive_regular_message(&mut lane, 3); + assert_eq!( + lane.receive_state_update(OutboundLaneData { + latest_received_nonce: 3, + ..Default::default() + }), + Some(3), + ); + assert_eq!(lane.storage.data().last_confirmed_nonce, 3); + + assert_eq!( + lane.receive_state_update(OutboundLaneData { + latest_received_nonce: 3, + ..Default::default() + }), + None, + ); + assert_eq!(lane.storage.data().last_confirmed_nonce, 3); + }); + } + + #[test] + fn receive_status_update_works() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + receive_regular_message(&mut lane, 1); + receive_regular_message(&mut lane, 2); + receive_regular_message(&mut lane, 3); + assert_eq!(lane.storage.data().last_confirmed_nonce, 0); + assert_eq!(lane.storage.data().relayers, vec![(1, 3, TEST_RELAYER_A)]); + + assert_eq!( + lane.receive_state_update(OutboundLaneData { + latest_received_nonce: 2, + ..Default::default() + }), + Some(2), + ); + assert_eq!(lane.storage.data().last_confirmed_nonce, 2); + assert_eq!(lane.storage.data().relayers, vec![(3, 3, TEST_RELAYER_A)]); + + assert_eq!( + lane.receive_state_update(OutboundLaneData { + latest_received_nonce: 3, + ..Default::default() + }), + Some(3), + ); + assert_eq!(lane.storage.data().last_confirmed_nonce, 3); + assert_eq!(lane.storage.data().relayers, vec![]); + }); + } + + #[test] + fn receive_status_update_works_with_batches_from_relayers() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + let mut seed_storage_data = lane.storage.data(); + // Prepare data + seed_storage_data.last_confirmed_nonce = 0; + seed_storage_data.relayers.push_back((1, 1, TEST_RELAYER_A)); + // Simulate messages batch (2, 3, 4) from relayer #2 + seed_storage_data.relayers.push_back((2, 4, TEST_RELAYER_B)); + seed_storage_data.relayers.push_back((5, 5, TEST_RELAYER_C)); + lane.storage.set_data(seed_storage_data); + // Check + assert_eq!( + lane.receive_state_update(OutboundLaneData { + latest_received_nonce: 3, + ..Default::default() + }), + Some(3), + ); + assert_eq!(lane.storage.data().last_confirmed_nonce, 3); + assert_eq!( + lane.storage.data().relayers, + vec![(4, 4, TEST_RELAYER_B), (5, 5, TEST_RELAYER_C)] + ); + }); + } + + #[test] + fn fails_to_receive_message_with_incorrect_nonce() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + assert!(!lane.receive_message::( + TEST_RELAYER_A, + 10, + message_data(REGULAR_PAYLOAD).into() + )); + assert_eq!(lane.storage.data().last_delivered_nonce(), 0); + }); + } + + #[test] + fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + let max_nonce = ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); + for current_nonce in 1..max_nonce + 1 { + assert!(lane.receive_message::( + TEST_RELAYER_A + current_nonce, + current_nonce, + message_data(REGULAR_PAYLOAD).into() + )); + } + // Fails to dispatch new message from different than latest relayer. + assert_eq!( + false, + lane.receive_message::( + TEST_RELAYER_A + max_nonce + 1, + max_nonce + 1, + message_data(REGULAR_PAYLOAD).into() + ) + ); + // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. + assert_eq!( + false, + lane.receive_message::( + TEST_RELAYER_A + max_nonce, + max_nonce + 1, + message_data(REGULAR_PAYLOAD).into() + ) + ); + }); + } + + #[test] + fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + let max_nonce = ::MaxUnconfirmedMessagesAtInboundLane::get(); + for current_nonce in 1..=max_nonce { + assert!(lane.receive_message::( + TEST_RELAYER_A, + current_nonce, + message_data(REGULAR_PAYLOAD).into() + )); + } + // Fails to dispatch new message from different than latest relayer. + assert_eq!( + false, + lane.receive_message::( + TEST_RELAYER_B, + max_nonce + 1, + message_data(REGULAR_PAYLOAD).into() + ) + ); + // Fails to dispatch new messages from latest relayer. + assert_eq!( + false, + lane.receive_message::( + TEST_RELAYER_A, + max_nonce + 1, + message_data(REGULAR_PAYLOAD).into() + ) + ); + }); + } + + #[test] + fn correctly_receives_following_messages_from_two_relayers_alternately() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + assert!(lane.receive_message::( + TEST_RELAYER_A, + 1, + message_data(REGULAR_PAYLOAD).into() + )); + assert!(lane.receive_message::( + TEST_RELAYER_B, + 2, + message_data(REGULAR_PAYLOAD).into() + )); + assert!(lane.receive_message::( + TEST_RELAYER_A, + 3, + message_data(REGULAR_PAYLOAD).into() + )); + assert_eq!( + lane.storage.data().relayers, + vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B), (3, 3, TEST_RELAYER_A)] + ); + }); + } + + #[test] + fn rejects_same_message_from_two_different_relayers() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + assert!(lane.receive_message::( + TEST_RELAYER_A, + 1, + message_data(REGULAR_PAYLOAD).into() + )); + assert_eq!( + false, + lane.receive_message::(TEST_RELAYER_B, 1, message_data(REGULAR_PAYLOAD).into()) + ); + }); + } + + #[test] + fn correct_message_is_processed_instantly() { + run_test(|| { + let mut lane = inbound_lane::(TEST_LANE_ID); + receive_regular_message(&mut lane, 1); + assert_eq!(lane.storage.data().last_delivered_nonce(), 1); + }); + } +} diff --git a/polkadot/modules/messages/src/instant_payments.rs b/polkadot/modules/messages/src/instant_payments.rs new file mode 100644 index 00000000000..524a3765d6a --- /dev/null +++ b/polkadot/modules/messages/src/instant_payments.rs @@ -0,0 +1,251 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Implementation of `MessageDeliveryAndDispatchPayment` trait on top of `Currency` trait. +//! +//! The payment is first transferred to a special `relayers-fund` account and only transferred +//! to the actual relayer in case confirmation is received. + +use bp_messages::{ + source_chain::{MessageDeliveryAndDispatchPayment, RelayersRewards, Sender}, + MessageNonce, +}; +use codec::Encode; +use frame_support::traits::{Currency as CurrencyT, ExistenceRequirement, Get}; +use num_traits::Zero; +use sp_runtime::traits::Saturating; +use sp_std::fmt::Debug; + +/// Instant message payments made in given currency. +/// +/// The balance is initally reserved in a special `relayers-fund` account, and transferred +/// to the relayer when message delivery is confirmed. +/// +/// Additionaly, confirmation transaction submitter (`confirmation_relayer`) is reimbursed +/// with the confirmation rewards (part of message fee, reserved to pay for delivery confirmation). +/// +/// NOTE The `relayers-fund` account must always exist i.e. be over Existential Deposit (ED; the +/// pallet enforces that) to make sure that even if the message cost is below ED it is still payed +/// to the relayer account. +/// NOTE It's within relayer's interest to keep their balance above ED as well, to make sure they +/// can receive the payment. +pub struct InstantCurrencyPayments { + _phantom: sp_std::marker::PhantomData<(T, Currency, GetConfirmationFee, RootAccount)>, +} + +impl MessageDeliveryAndDispatchPayment + for InstantCurrencyPayments +where + T: frame_system::Config, + Currency: CurrencyT, + Currency::Balance: From, + GetConfirmationFee: Get, + RootAccount: Get>, +{ + type Error = &'static str; + + fn initialize(relayer_fund_account: &T::AccountId) -> usize { + assert!( + frame_system::Pallet::::account_exists(relayer_fund_account), + "The relayer fund account ({:?}) must exist for the message lanes pallet to work correctly.", + relayer_fund_account, + ); + 1 + } + + fn pay_delivery_and_dispatch_fee( + submitter: &Sender, + fee: &Currency::Balance, + relayer_fund_account: &T::AccountId, + ) -> Result<(), Self::Error> { + let root_account = RootAccount::get(); + let account = match submitter { + Sender::Signed(submitter) => submitter, + Sender::Root | Sender::None => root_account + .as_ref() + .ok_or("Sending messages using Root or None origin is disallowed.")?, + }; + + Currency::transfer( + account, + relayer_fund_account, + *fee, + // it's fine for the submitter to go below Existential Deposit and die. + ExistenceRequirement::AllowDeath, + ) + .map_err(Into::into) + } + + fn pay_relayers_rewards( + confirmation_relayer: &T::AccountId, + relayers_rewards: RelayersRewards, + relayer_fund_account: &T::AccountId, + ) { + pay_relayers_rewards::( + confirmation_relayer, + relayers_rewards, + relayer_fund_account, + GetConfirmationFee::get(), + ); + } +} + +/// Pay rewards to given relayers, optionally rewarding confirmation relayer. +fn pay_relayers_rewards( + confirmation_relayer: &AccountId, + relayers_rewards: RelayersRewards, + relayer_fund_account: &AccountId, + confirmation_fee: Currency::Balance, +) where + AccountId: Debug + Default + Encode + PartialEq, + Currency: CurrencyT, + Currency::Balance: From, +{ + // reward every relayer except `confirmation_relayer` + let mut confirmation_relayer_reward = Currency::Balance::zero(); + for (relayer, reward) in relayers_rewards { + let mut relayer_reward = reward.reward; + + if relayer != *confirmation_relayer { + // If delivery confirmation is submitted by other relayer, let's deduct confirmation fee + // from relayer reward. + // + // If confirmation fee has been increased (or if it was the only component of message fee), + // then messages relayer may receive zero reward. + let mut confirmation_reward = confirmation_fee.saturating_mul(reward.messages.into()); + if confirmation_reward > relayer_reward { + confirmation_reward = relayer_reward; + } + relayer_reward = relayer_reward.saturating_sub(confirmation_reward); + confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(confirmation_reward); + } else { + // If delivery confirmation is submitted by this relayer, let's add confirmation fee + // from other relayers to this relayer reward. + confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(reward.reward); + continue; + } + + pay_relayer_reward::(relayer_fund_account, &relayer, relayer_reward); + } + + // finally - pay reward to confirmation relayer + pay_relayer_reward::(relayer_fund_account, confirmation_relayer, confirmation_relayer_reward); +} + +/// Transfer funds from relayers fund account to given relayer. +fn pay_relayer_reward( + relayer_fund_account: &AccountId, + relayer_account: &AccountId, + reward: Currency::Balance, +) where + AccountId: Debug, + Currency: CurrencyT, +{ + if reward.is_zero() { + return; + } + + let pay_result = Currency::transfer( + relayer_fund_account, + relayer_account, + reward, + // the relayer fund account must stay above ED (needs to be pre-funded) + ExistenceRequirement::KeepAlive, + ); + + match pay_result { + Ok(_) => log::trace!( + target: "runtime::bridge-messages", + "Rewarded relayer {:?} with {:?}", + relayer_account, + reward, + ), + Err(error) => log::trace!( + target: "runtime::bridge-messages", + "Failed to pay relayer {:?} reward {:?}: {:?}", + relayer_account, + reward, + error, + ), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{run_test, AccountId as TestAccountId, Balance as TestBalance, TestRuntime}; + use bp_messages::source_chain::RelayerRewards; + + type Balances = pallet_balances::Pallet; + + const RELAYER_1: TestAccountId = 1; + const RELAYER_2: TestAccountId = 2; + const RELAYER_3: TestAccountId = 3; + const RELAYERS_FUND_ACCOUNT: TestAccountId = crate::mock::ENDOWED_ACCOUNT; + + fn relayers_rewards() -> RelayersRewards { + vec![ + ( + RELAYER_1, + RelayerRewards { + reward: 100, + messages: 2, + }, + ), + ( + RELAYER_2, + RelayerRewards { + reward: 100, + messages: 3, + }, + ), + ] + .into_iter() + .collect() + } + + #[test] + fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { + run_test(|| { + pay_relayers_rewards::(&RELAYER_2, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10); + + assert_eq!(Balances::free_balance(&RELAYER_1), 80); + assert_eq!(Balances::free_balance(&RELAYER_2), 120); + }); + } + + #[test] + fn confirmation_relayer_is_rewarded_if_it_has_not_delivered_any_delivered_messages() { + run_test(|| { + pay_relayers_rewards::(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10); + + assert_eq!(Balances::free_balance(&RELAYER_1), 80); + assert_eq!(Balances::free_balance(&RELAYER_2), 70); + assert_eq!(Balances::free_balance(&RELAYER_3), 50); + }); + } + + #[test] + fn only_confirmation_relayer_is_rewarded_if_confirmation_fee_has_significantly_increased() { + run_test(|| { + pay_relayers_rewards::(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 1000); + + assert_eq!(Balances::free_balance(&RELAYER_1), 0); + assert_eq!(Balances::free_balance(&RELAYER_2), 0); + assert_eq!(Balances::free_balance(&RELAYER_3), 200); + }); + } +} diff --git a/polkadot/modules/messages/src/lib.rs b/polkadot/modules/messages/src/lib.rs new file mode 100644 index 00000000000..9e2563498fe --- /dev/null +++ b/polkadot/modules/messages/src/lib.rs @@ -0,0 +1,1589 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Runtime module that allows sending and receiving messages using lane concept: +//! +//! 1) the message is sent using `send_message()` call; +//! 2) every outbound message is assigned nonce; +//! 3) the messages are stored in the storage; +//! 4) external component (relay) delivers messages to bridged chain; +//! 5) messages are processed in order (ordered by assigned nonce); +//! 6) relay may send proof-of-delivery back to this chain. +//! +//! Once message is sent, its progress can be tracked by looking at module events. +//! The assigned nonce is reported using `MessageAccepted` event. When message is +//! delivered to the the bridged chain, it is reported using `MessagesDelivered` event. +//! +//! **IMPORTANT NOTE**: after generating weights (custom `WeighInfo` implementation) for +//! your runtime (where this module is plugged to), please add test for these weights. +//! The test should call the `ensure_weights_are_correct` function from this module. +//! If this test fails with your weights, then either weights are computed incorrectly, +//! or some benchmarks assumptions are broken for your runtime. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use crate::weights_ext::{ + ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt, + EXPECTED_DEFAULT_MESSAGE_LENGTH, +}; + +use crate::inbound_lane::{InboundLane, InboundLaneStorage}; +use crate::outbound_lane::{OutboundLane, OutboundLaneStorage}; +use crate::weights::WeightInfo; + +use bp_messages::{ + source_chain::{LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, TargetHeaderChain}, + target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, + total_unrewarded_messages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce, MessagePayload, + OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState, +}; +use bp_runtime::Size; +use codec::{Decode, Encode}; +use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::Get, + weights::{DispatchClass, Weight}, + Parameter, StorageMap, +}; +use frame_system::{ensure_signed, RawOrigin}; +use num_traits::{SaturatingAdd, Zero}; +use sp_runtime::{traits::BadOrigin, DispatchResult}; +use sp_std::{cell::RefCell, cmp::PartialOrd, marker::PhantomData, prelude::*}; + +mod inbound_lane; +mod outbound_lane; +mod weights_ext; + +pub mod instant_payments; +pub mod weights; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; + +#[cfg(test)] +mod mock; + +/// The module configuration trait +pub trait Config: frame_system::Config { + // General types + + /// They overarching event type. + type Event: From> + Into<::Event>; + /// Benchmarks results from runtime we're plugged into. + type WeightInfo: WeightInfoExt; + /// Pallet parameter that is opaque to the pallet itself, but may be used by the runtime + /// for integrating the pallet. + /// + /// All pallet parameters may only be updated either by the root, or by the pallet owner. + type Parameter: MessagesParameter; + + /// Maximal number of messages that may be pruned during maintenance. Maintenance occurs + /// whenever new message is sent. The reason is that if you want to use lane, you should + /// be ready to pay for its maintenance. + type MaxMessagesToPruneAtOnce: Get; + /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the + /// relayer has delivered messages, but either confirmations haven't been delivered back to the + /// source chain, or we haven't received reward confirmations yet. + /// + /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep + /// in mind that the same relayer account may take several (non-consecutive) entries in this + /// set. + type MaxUnrewardedRelayerEntriesAtInboundLane: Get; + /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the + /// message has been delivered, but either confirmations haven't been delivered back to the + /// source chain, or we haven't received reward confirmations for these messages yet. + /// + /// This constant limits difference between last message from last entry of the + /// `InboundLaneData::relayers` and first message at the first entry. + /// + /// There is no point of making this parameter lesser than MaxUnrewardedRelayerEntriesAtInboundLane, + /// because then maximal number of relayer entries will be limited by maximal number of messages. + /// + /// This value also represents maximal number of messages in single delivery transaction. Transaction + /// that is declaring more messages than this value, will be rejected. Even if these messages are + /// from different lanes. + type MaxUnconfirmedMessagesAtInboundLane: Get; + + /// Payload type of outbound messages. This payload is dispatched on the bridged chain. + type OutboundPayload: Parameter + Size; + /// Message fee type of outbound messages. This fee is paid on this chain. + type OutboundMessageFee: Default + From + PartialOrd + Parameter + SaturatingAdd + Zero; + + /// Payload type of inbound messages. This payload is dispatched on this chain. + type InboundPayload: Decode; + /// Message fee type of inbound messages. This fee is paid on the bridged chain. + type InboundMessageFee: Decode; + /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the bridged chain. + type InboundRelayer: Parameter; + + /// A type which can be turned into an AccountId from a 256-bit hash. + /// + /// Used when deriving the shared relayer fund account. + type AccountIdConverter: sp_runtime::traits::Convert; + + // Types that are used by outbound_lane (on source chain). + + /// Target header chain. + type TargetHeaderChain: TargetHeaderChain; + /// Message payload verifier. + type LaneMessageVerifier: LaneMessageVerifier; + /// Message delivery payment. + type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment; + + // Types that are used by inbound_lane (on target chain). + + /// Source header chain, as it is represented on target chain. + type SourceHeaderChain: SourceHeaderChain; + /// Message dispatch. + type MessageDispatch: MessageDispatch; +} + +/// Shortcut to messages proof type for Config. +type MessagesProofOf = + <>::SourceHeaderChain as SourceHeaderChain<>::InboundMessageFee>>::MessagesProof; +/// Shortcut to messages delivery proof type for Config. +type MessagesDeliveryProofOf = <>::TargetHeaderChain as TargetHeaderChain< + >::OutboundPayload, + ::AccountId, +>>::MessagesDeliveryProof; + +decl_error! { + pub enum Error for Pallet, I: Instance> { + /// All pallet operations are halted. + Halted, + /// Message has been treated as invalid by chain verifier. + MessageRejectedByChainVerifier, + /// Message has been treated as invalid by lane verifier. + MessageRejectedByLaneVerifier, + /// Submitter has failed to pay fee for delivering and dispatching messages. + FailedToWithdrawMessageFee, + /// The transaction brings too many messages. + TooManyMessagesInTheProof, + /// Invalid messages has been submitted. + InvalidMessagesProof, + /// Invalid messages dispatch weight has been declared by the relayer. + InvalidMessagesDispatchWeight, + /// Invalid messages delivery proof has been submitted. + InvalidMessagesDeliveryProof, + /// The relayer has declared invalid unrewarded relayers state in the `receive_messages_delivery_proof` call. + InvalidUnrewardedRelayersState, + /// The message someone is trying to work with (i.e. increase fee) is already-delivered. + MessageIsAlreadyDelivered, + /// The message someone is trying to work with (i.e. increase fee) is not yet sent. + MessageIsNotYetSent + } +} + +decl_storage! { + trait Store for Pallet, I: Instance = DefaultInstance> as BridgeMessages { + /// Optional pallet owner. + /// + /// Pallet owner has a right to halt all pallet operations and then resume it. If it is + /// `None`, then there are no direct ways to halt/resume pallet operations, but other + /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt + /// flag directly or call the `halt_operations`). + pub PalletOwner get(fn module_owner): Option; + /// If true, all pallet transactions are failed immediately. + pub IsHalted get(fn is_halted) config(): bool; + /// Map of lane id => inbound lane data. + pub InboundLanes: map hasher(blake2_128_concat) LaneId => InboundLaneData; + /// Map of lane id => outbound lane data. + pub OutboundLanes: map hasher(blake2_128_concat) LaneId => OutboundLaneData; + /// All queued outbound messages. + pub OutboundMessages: map hasher(blake2_128_concat) MessageKey => Option>; + } + add_extra_genesis { + config(phantom): sp_std::marker::PhantomData; + config(owner): Option; + build(|config| { + if let Some(ref owner) = config.owner { + >::put(owner); + } + }) + } +} + +decl_event!( + pub enum Event + where + AccountId = ::AccountId, + Parameter = >::Parameter, + { + /// Pallet parameter has been updated. + ParameterUpdated(Parameter), + /// Message has been accepted and is waiting to be delivered. + MessageAccepted(LaneId, MessageNonce), + /// Messages in the inclusive range have been delivered and processed by the bridged chain. + MessagesDelivered(LaneId, MessageNonce, MessageNonce), + /// Phantom member, never used. + Dummy(PhantomData<(AccountId, I)>), + } +); + +decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + /// Deposit one of this module's events by using the default implementation. + fn deposit_event() = default; + + /// Ensure runtime invariants. + fn on_runtime_upgrade() -> Weight { + let reads = T::MessageDeliveryAndDispatchPayment::initialize( + &Self::relayer_fund_account_id() + ); + T::DbWeight::get().reads(reads as u64) + } + + /// Change `PalletOwner`. + /// + /// May only be called either by root, or by `PalletOwner`. + #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] + pub fn set_owner(origin, new_owner: Option) { + ensure_owner_or_root::(origin)?; + match new_owner { + Some(new_owner) => { + PalletOwner::::put(&new_owner); + log::info!(target: "runtime::bridge-messages", "Setting pallet Owner to: {:?}", new_owner); + }, + None => { + PalletOwner::::kill(); + log::info!(target: "runtime::bridge-messages", "Removed Owner of pallet."); + }, + } + } + + /// Halt or resume all pallet operations. + /// + /// May only be called either by root, or by `PalletOwner`. + #[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)] + pub fn set_operational(origin, operational: bool) { + ensure_owner_or_root::(origin)?; + >::put(operational); + + if operational { + log::info!(target: "runtime::bridge-messages", "Resuming pallet operations."); + } else { + log::warn!(target: "runtime::bridge-messages", "Stopping pallet operations."); + } + } + + /// Update pallet parameter. + /// + /// May only be called either by root, or by `PalletOwner`. + /// + /// The weight is: single read for permissions check + 2 writes for parameter value and event. + #[weight = (T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational)] + pub fn update_pallet_parameter(origin, parameter: T::Parameter) { + ensure_owner_or_root::(origin)?; + parameter.save(); + Self::deposit_event(RawEvent::ParameterUpdated(parameter)); + } + + /// Send message over lane. + #[weight = T::WeightInfo::send_message_weight(payload)] + pub fn send_message( + origin, + lane_id: LaneId, + payload: T::OutboundPayload, + delivery_and_dispatch_fee: T::OutboundMessageFee, + ) -> DispatchResult { + ensure_operational::()?; + let submitter = origin.into().map_err(|_| BadOrigin)?; + + // let's first check if message can be delivered to target chain + T::TargetHeaderChain::verify_message(&payload) + .map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Message to lane {:?} is rejected by target chain: {:?}", + lane_id, + err, + ); + + Error::::MessageRejectedByChainVerifier + })?; + + // now let's enforce any additional lane rules + let mut lane = outbound_lane::(lane_id); + T::LaneMessageVerifier::verify_message( + &submitter, + &delivery_and_dispatch_fee, + &lane_id, + &lane.data(), + &payload, + ).map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Message to lane {:?} is rejected by lane verifier: {:?}", + lane_id, + err, + ); + + Error::::MessageRejectedByLaneVerifier + })?; + + // let's withdraw delivery and dispatch fee from submitter + T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( + &submitter, + &delivery_and_dispatch_fee, + &Self::relayer_fund_account_id(), + ).map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Message to lane {:?} is rejected because submitter {:?} is unable to pay fee {:?}: {:?}", + lane_id, + submitter, + delivery_and_dispatch_fee, + err, + ); + + Error::::FailedToWithdrawMessageFee + })?; + + // finally, save message in outbound storage and emit event + let encoded_payload = payload.encode(); + let encoded_payload_len = encoded_payload.len(); + let nonce = lane.send_message(MessageData { + payload: encoded_payload, + fee: delivery_and_dispatch_fee, + }); + lane.prune_messages(T::MaxMessagesToPruneAtOnce::get()); + + log::trace!( + target: "runtime::bridge-messages", + "Accepted message {} to lane {:?}. Message size: {:?}", + nonce, + lane_id, + encoded_payload_len, + ); + + Self::deposit_event(RawEvent::MessageAccepted(lane_id, nonce)); + + Ok(()) + } + + /// Pay additional fee for the message. + #[weight = T::WeightInfo::increase_message_fee()] + pub fn increase_message_fee( + origin, + lane_id: LaneId, + nonce: MessageNonce, + additional_fee: T::OutboundMessageFee, + ) -> DispatchResult { + // if someone tries to pay for already-delivered message, we're rejecting this intention + // (otherwise this additional fee will be locked forever in relayers fund) + // + // if someone tries to pay for not-yet-sent message, we're rejeting this intention, or + // we're risking to have mess in the storage + let lane = outbound_lane::(lane_id); + ensure!(nonce > lane.data().latest_received_nonce, Error::::MessageIsAlreadyDelivered); + ensure!(nonce <= lane.data().latest_generated_nonce, Error::::MessageIsNotYetSent); + + // withdraw additional fee from submitter + let submitter = origin.into().map_err(|_| BadOrigin)?; + T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( + &submitter, + &additional_fee, + &Self::relayer_fund_account_id(), + ).map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Submitter {:?} can't pay additional fee {:?} for the message {:?}/{:?}: {:?}", + submitter, + additional_fee, + lane_id, + nonce, + err, + ); + + Error::::FailedToWithdrawMessageFee + })?; + + // and finally update fee in the storage + let message_key = MessageKey { lane_id, nonce }; + OutboundMessages::::mutate(message_key, |message_data| { + // saturating_add is fine here - overflow here means that someone controls all + // chain funds, which shouldn't ever happen + `pay_delivery_and_dispatch_fee` + // above will fail before we reach here + let message_data = message_data + .as_mut() + .expect("the message is sent and not yet delivered; so it is in the storage; qed"); + message_data.fee = message_data.fee.saturating_add(&additional_fee); + }); + + Ok(()) + } + + /// Receive messages proof from bridged chain. + /// + /// The weight of the call assumes that the transaction always brings outbound lane + /// state update. Because of that, the submitter (relayer) has no benefit of not including + /// this data in the transaction, so reward confirmations lags should be minimal. + #[weight = T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight)] + pub fn receive_messages_proof( + origin, + relayer_id: T::InboundRelayer, + proof: MessagesProofOf, + messages_count: u32, + dispatch_weight: Weight, + ) -> DispatchResult { + ensure_operational::()?; + let _ = ensure_signed(origin)?; + + // reject transactions that are declaring too many messages + ensure!( + MessageNonce::from(messages_count) <= T::MaxUnconfirmedMessagesAtInboundLane::get(), + Error::::TooManyMessagesInTheProof + ); + + // verify messages proof && convert proof into messages + let messages = verify_and_decode_messages_proof::< + T::SourceHeaderChain, + T::InboundMessageFee, + T::InboundPayload, + >(proof, messages_count) + .map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Rejecting invalid messages proof: {:?}", + err, + ); + + Error::::InvalidMessagesProof + })?; + + // verify that relayer is paying actual dispatch weight + let actual_dispatch_weight: Weight = messages + .values() + .map(|lane_messages| lane_messages + .messages + .iter() + .map(T::MessageDispatch::dispatch_weight) + .fold(0, |sum, weight| sum.saturating_add(&weight)) + ) + .fold(0, |sum, weight| sum.saturating_add(weight)); + if dispatch_weight < actual_dispatch_weight { + log::trace!( + target: "runtime::bridge-messages", + "Rejecting messages proof because of dispatch weight mismatch: declared={}, expected={}", + dispatch_weight, + actual_dispatch_weight, + ); + + return Err(Error::::InvalidMessagesDispatchWeight.into()); + } + + // dispatch messages and (optionally) update lane(s) state(s) + let mut total_messages = 0; + let mut valid_messages = 0; + for (lane_id, lane_data) in messages { + let mut lane = inbound_lane::(lane_id); + + if let Some(lane_state) = lane_data.lane_state { + let updated_latest_confirmed_nonce = lane.receive_state_update(lane_state); + if let Some(updated_latest_confirmed_nonce) = updated_latest_confirmed_nonce { + log::trace!( + target: "runtime::bridge-messages", + "Received lane {:?} state update: latest_confirmed_nonce={}", + lane_id, + updated_latest_confirmed_nonce, + ); + } + } + + for message in lane_data.messages { + debug_assert_eq!(message.key.lane_id, lane_id); + + total_messages += 1; + if lane.receive_message::(relayer_id.clone(), message.key.nonce, message.data) { + valid_messages += 1; + } + } + } + + log::trace!( + target: "runtime::bridge-messages", + "Received messages: total={}, valid={}", + total_messages, + valid_messages, + ); + + Ok(()) + } + + /// Receive messages delivery proof from bridged chain. + #[weight = T::WeightInfo::receive_messages_delivery_proof_weight(proof, relayers_state)] + pub fn receive_messages_delivery_proof( + origin, + proof: MessagesDeliveryProofOf, + relayers_state: UnrewardedRelayersState, + ) -> DispatchResult { + ensure_operational::()?; + + let confirmation_relayer = ensure_signed(origin)?; + let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| { + log::trace!( + target: "runtime::bridge-messages", + "Rejecting invalid messages delivery proof: {:?}", + err, + ); + + Error::::InvalidMessagesDeliveryProof + })?; + + // verify that the relayer has declared correct `lane_data::relayers` state + // (we only care about total number of entries and messages, because this affects call weight) + ensure!( + total_unrewarded_messages(&lane_data.relayers) + .unwrap_or(MessageNonce::MAX) == relayers_state.total_messages + && lane_data.relayers.len() as MessageNonce == relayers_state.unrewarded_relayer_entries, + Error::::InvalidUnrewardedRelayersState + ); + + // mark messages as delivered + let mut lane = outbound_lane::(lane_id); + let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new(); + let last_delivered_nonce = lane_data.last_delivered_nonce(); + let received_range = lane.confirm_delivery(last_delivered_nonce); + if let Some(received_range) = received_range { + Self::deposit_event(RawEvent::MessagesDelivered(lane_id, received_range.0, received_range.1)); + + // remember to reward relayers that have delivered messages + // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain + for (nonce_low, nonce_high, relayer) in lane_data.relayers { + let nonce_begin = sp_std::cmp::max(nonce_low, received_range.0); + let nonce_end = sp_std::cmp::min(nonce_high, received_range.1); + + // loop won't proceed if current entry is ahead of received range (begin > end). + // this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain + let mut relayer_reward = relayers_rewards.entry(relayer).or_default(); + for nonce in nonce_begin..nonce_end + 1 { + let message_data = OutboundMessages::::get(MessageKey { + lane_id, + nonce, + }).expect("message was just confirmed; we never prune unconfirmed messages; qed"); + relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee); + relayer_reward.messages += 1; + } + } + } + + // if some new messages have been confirmed, reward relayers + if !relayers_rewards.is_empty() { + let relayer_fund_account = Self::relayer_fund_account_id(); + >::MessageDeliveryAndDispatchPayment::pay_relayers_rewards( + &confirmation_relayer, + relayers_rewards, + &relayer_fund_account, + ); + } + + log::trace!( + target: "runtime::bridge-messages", + "Received messages delivery proof up to (and including) {} at lane {:?}", + last_delivered_nonce, + lane_id, + ); + + Ok(()) + } + } +} + +impl, I: Instance> Pallet { + /// Get payload of given outbound message. + pub fn outbound_message_payload(lane: LaneId, nonce: MessageNonce) -> Option { + OutboundMessages::::get(MessageKey { lane_id: lane, nonce }).map(|message_data| message_data.payload) + } + + /// Get nonce of latest generated message at given outbound lane. + pub fn outbound_latest_generated_nonce(lane: LaneId) -> MessageNonce { + OutboundLanes::::get(&lane).latest_generated_nonce + } + + /// Get nonce of latest confirmed message at given outbound lane. + pub fn outbound_latest_received_nonce(lane: LaneId) -> MessageNonce { + OutboundLanes::::get(&lane).latest_received_nonce + } + + /// Get nonce of latest received message at given inbound lane. + pub fn inbound_latest_received_nonce(lane: LaneId) -> MessageNonce { + InboundLanes::::get(&lane).last_delivered_nonce() + } + + /// Get nonce of latest confirmed message at given inbound lane. + pub fn inbound_latest_confirmed_nonce(lane: LaneId) -> MessageNonce { + InboundLanes::::get(&lane).last_confirmed_nonce + } + + /// Get state of unrewarded relayers set. + pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState { + let relayers = InboundLanes::::get(&lane).relayers; + bp_messages::UnrewardedRelayersState { + unrewarded_relayer_entries: relayers.len() as _, + messages_in_oldest_entry: relayers.front().map(|(begin, end, _)| 1 + end - begin).unwrap_or(0), + total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), + } + } + + /// AccountId of the shared relayer fund account. + /// + /// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending + /// on the implementation it can be used to store relayers rewards. + /// See [InstantCurrencyPayments] for a concrete implementation. + pub fn relayer_fund_account_id() -> T::AccountId { + use sp_runtime::traits::Convert; + let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID); + T::AccountIdConverter::convert(encoded_id) + } +} + +/// Getting storage keys for messages and lanes states. These keys are normally used when building +/// messages and lanes states proofs. +/// +/// Keep in mind that all functions in this module are **NOT** using passed `T` argument, so any +/// runtime can be passed. E.g. if you're verifying proof from Runtime1 in Runtime2, you only have +/// access to Runtime2 and you may pass it to the functions, where required. This is because our +/// maps are not using any Runtime-specific data in the keys. +/// +/// On the other side, passing correct instance is required. So if proof has been crafted by the +/// Instance1, you should verify it using Instance1. This is inconvenient if you're using different +/// instances on different sides of the bridge. I.e. in Runtime1 it is Instance2, but on Runtime2 +/// it is Instance42. But there's no other way, but to craft this key manually (which is what I'm +/// trying to avoid here) - by using strings like "Instance2", "OutboundMessages", etc. +pub mod storage_keys { + use super::*; + use frame_support::storage::generator::StorageMap; + use sp_core::storage::StorageKey; + + /// Storage key of the outbound message in the runtime storage. + pub fn message_key, I: Instance>(lane: &LaneId, nonce: MessageNonce) -> StorageKey { + let message_key = MessageKey { lane_id: *lane, nonce }; + let raw_storage_key = OutboundMessages::::storage_map_final_key(message_key); + StorageKey(raw_storage_key) + } + + /// Storage key of the outbound message lane state in the runtime storage. + pub fn outbound_lane_data_key(lane: &LaneId) -> StorageKey { + StorageKey(OutboundLanes::::storage_map_final_key(*lane)) + } + + /// Storage key of the inbound message lane state in the runtime storage. + pub fn inbound_lane_data_key, I: Instance>(lane: &LaneId) -> StorageKey { + StorageKey(InboundLanes::::storage_map_final_key(*lane)) + } +} + +/// Ensure that the origin is either root, or `PalletOwner`. +fn ensure_owner_or_root, I: Instance>(origin: T::Origin) -> Result<(), BadOrigin> { + match origin.into() { + Ok(RawOrigin::Root) => Ok(()), + Ok(RawOrigin::Signed(ref signer)) if Some(signer) == Pallet::::module_owner().as_ref() => Ok(()), + _ => Err(BadOrigin), + } +} + +/// Ensure that the pallet is in operational mode (not halted). +fn ensure_operational, I: Instance>() -> Result<(), Error> { + if IsHalted::::get() { + Err(Error::::Halted) + } else { + Ok(()) + } +} + +/// Creates new inbound lane object, backed by runtime storage. +fn inbound_lane, I: Instance>(lane_id: LaneId) -> InboundLane> { + InboundLane::new(inbound_lane_storage::(lane_id)) +} + +/// Creates new runtime inbound lane storage. +fn inbound_lane_storage, I: Instance>(lane_id: LaneId) -> RuntimeInboundLaneStorage { + RuntimeInboundLaneStorage { + lane_id, + cached_data: RefCell::new(None), + _phantom: Default::default(), + } +} + +/// Creates new outbound lane object, backed by runtime storage. +fn outbound_lane, I: Instance>(lane_id: LaneId) -> OutboundLane> { + OutboundLane::new(RuntimeOutboundLaneStorage { + lane_id, + _phantom: Default::default(), + }) +} + +/// Runtime inbound lane storage. +struct RuntimeInboundLaneStorage, I = DefaultInstance> { + lane_id: LaneId, + cached_data: RefCell>>, + _phantom: PhantomData, +} + +impl, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage { + type MessageFee = T::InboundMessageFee; + type Relayer = T::InboundRelayer; + + fn id(&self) -> LaneId { + self.lane_id + } + + fn max_unrewarded_relayer_entries(&self) -> MessageNonce { + T::MaxUnrewardedRelayerEntriesAtInboundLane::get() + } + + fn max_unconfirmed_messages(&self) -> MessageNonce { + T::MaxUnconfirmedMessagesAtInboundLane::get() + } + + fn data(&self) -> InboundLaneData { + match self.cached_data.clone().into_inner() { + Some(data) => data, + None => { + let data = InboundLanes::::get(&self.lane_id); + *self.cached_data.try_borrow_mut().expect( + "we're in the single-threaded environment;\ + we have no recursive borrows; qed", + ) = Some(data.clone()); + data + } + } + } + + fn set_data(&mut self, data: InboundLaneData) { + *self.cached_data.try_borrow_mut().expect( + "we're in the single-threaded environment;\ + we have no recursive borrows; qed", + ) = Some(data.clone()); + InboundLanes::::insert(&self.lane_id, data) + } +} + +/// Runtime outbound lane storage. +struct RuntimeOutboundLaneStorage { + lane_id: LaneId, + _phantom: PhantomData<(T, I)>, +} + +impl, I: Instance> OutboundLaneStorage for RuntimeOutboundLaneStorage { + type MessageFee = T::OutboundMessageFee; + + fn id(&self) -> LaneId { + self.lane_id + } + + fn data(&self) -> OutboundLaneData { + OutboundLanes::::get(&self.lane_id) + } + + fn set_data(&mut self, data: OutboundLaneData) { + OutboundLanes::::insert(&self.lane_id, data) + } + + #[cfg(test)] + fn message(&self, nonce: &MessageNonce) -> Option> { + OutboundMessages::::get(MessageKey { + lane_id: self.lane_id, + nonce: *nonce, + }) + } + + fn save_message(&mut self, nonce: MessageNonce, mesage_data: MessageData) { + OutboundMessages::::insert( + MessageKey { + lane_id: self.lane_id, + nonce, + }, + mesage_data, + ); + } + + fn remove_message(&mut self, nonce: &MessageNonce) { + OutboundMessages::::remove(MessageKey { + lane_id: self.lane_id, + nonce: *nonce, + }); + } +} + +/// Verify messages proof and return proved messages with decoded payload. +fn verify_and_decode_messages_proof, Fee, DispatchPayload: Decode>( + proof: Chain::MessagesProof, + messages_count: u32, +) -> Result>, Chain::Error> { + // `receive_messages_proof` weight formula and `MaxUnconfirmedMessagesAtInboundLane` check + // guarantees that the `message_count` is sane and Vec may be allocated. + // (tx with too many messages will either be rejected from the pool, or will fail earlier) + Chain::verify_messages_proof(proof, messages_count).map(|messages_by_lane| { + messages_by_lane + .into_iter() + .map(|(lane, lane_data)| { + ( + lane, + ProvedLaneMessages { + lane_state: lane_data.lane_state, + messages: lane_data.messages.into_iter().map(Into::into).collect(), + }, + ) + }) + .collect() + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{ + message, run_test, Event as TestEvent, Origin, TestMessageDeliveryAndDispatchPayment, + TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof, TestPayload, TestRuntime, + TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, + TEST_RELAYER_B, + }; + use bp_messages::UnrewardedRelayersState; + use frame_support::{assert_noop, assert_ok}; + use frame_system::{EventRecord, Pallet as System, Phase}; + use hex_literal::hex; + use sp_runtime::DispatchError; + + fn get_ready_for_events() { + System::::set_block_number(1); + System::::reset_events(); + } + + fn send_regular_message() { + get_ready_for_events(); + + assert_ok!(Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + REGULAR_PAYLOAD, + REGULAR_PAYLOAD.1, + )); + + // check event with assigned nonce + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::pallet_bridge_messages(RawEvent::MessageAccepted(TEST_LANE_ID, 1)), + topics: vec![], + }], + ); + + // check that fee has been withdrawn from submitter + assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, REGULAR_PAYLOAD.1)); + } + + fn receive_messages_delivery_proof() { + System::::set_block_number(1); + System::::reset_events(); + + assert_ok!(Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + ..Default::default() + }, + ))), + Default::default(), + )); + + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::pallet_bridge_messages(RawEvent::MessagesDelivered(TEST_LANE_ID, 1, 1)), + topics: vec![], + }], + ); + } + + #[test] + fn pallet_owner_may_change_owner() { + run_test(|| { + PalletOwner::::put(2); + + assert_ok!(Pallet::::set_owner(Origin::root(), Some(1))); + assert_noop!( + Pallet::::set_operational(Origin::signed(2), false), + DispatchError::BadOrigin, + ); + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + + assert_ok!(Pallet::::set_owner(Origin::signed(1), None)); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + assert_noop!( + Pallet::::set_operational(Origin::signed(2), true), + DispatchError::BadOrigin, + ); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + }); + } + + #[test] + fn pallet_may_be_halted_by_root() { + run_test(|| { + assert_ok!(Pallet::::set_operational(Origin::root(), false)); + assert_ok!(Pallet::::set_operational(Origin::root(), true)); + }); + } + + #[test] + fn pallet_may_be_halted_by_owner() { + run_test(|| { + PalletOwner::::put(2); + + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_ok!(Pallet::::set_operational(Origin::signed(2), true)); + + assert_noop!( + Pallet::::set_operational(Origin::signed(1), false), + DispatchError::BadOrigin, + ); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + + assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); + assert_noop!( + Pallet::::set_operational(Origin::signed(1), true), + DispatchError::BadOrigin, + ); + }); + } + + #[test] + fn pallet_parameter_may_be_updated_by_root() { + run_test(|| { + get_ready_for_events(); + + let parameter = TestMessagesParameter::TokenConversionRate(10.into()); + assert_ok!(Pallet::::update_pallet_parameter( + Origin::root(), + parameter.clone(), + )); + + assert_eq!(TokenConversionRate::get(), 10.into()); + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::pallet_bridge_messages(RawEvent::ParameterUpdated(parameter)), + topics: vec![], + }], + ); + }); + } + + #[test] + fn pallet_parameter_may_be_updated_by_owner() { + run_test(|| { + PalletOwner::::put(2); + get_ready_for_events(); + + let parameter = TestMessagesParameter::TokenConversionRate(10.into()); + assert_ok!(Pallet::::update_pallet_parameter( + Origin::signed(2), + parameter.clone(), + )); + + assert_eq!(TokenConversionRate::get(), 10.into()); + assert_eq!( + System::::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::pallet_bridge_messages(RawEvent::ParameterUpdated(parameter)), + topics: vec![], + }], + ); + }); + } + + #[test] + fn pallet_parameter_cant_be_updated_by_arbitrary_submitter() { + run_test(|| { + assert_noop!( + Pallet::::update_pallet_parameter( + Origin::signed(2), + TestMessagesParameter::TokenConversionRate(10.into()), + ), + DispatchError::BadOrigin, + ); + + PalletOwner::::put(2); + + assert_noop!( + Pallet::::update_pallet_parameter( + Origin::signed(1), + TestMessagesParameter::TokenConversionRate(10.into()), + ), + DispatchError::BadOrigin, + ); + }); + } + + #[test] + fn fixed_u128_works_as_i_think() { + // this test is here just to be sure that conversion rate may be represented with FixedU128 + run_test(|| { + use sp_runtime::{FixedPointNumber, FixedU128}; + + // 1:1 conversion that we use by default for testnets + let rialto_token = 1u64; + let rialto_token_in_millau_tokens = TokenConversionRate::get().saturating_mul_int(rialto_token); + assert_eq!(rialto_token_in_millau_tokens, 1); + + // let's say conversion rate is 1:1.7 + let conversion_rate = FixedU128::saturating_from_rational(170, 100); + let rialto_tokens = 100u64; + let rialto_tokens_in_millau_tokens = conversion_rate.saturating_mul_int(rialto_tokens); + assert_eq!(rialto_tokens_in_millau_tokens, 170); + + // let's say conversion rate is 1:0.25 + let conversion_rate = FixedU128::saturating_from_rational(25, 100); + let rialto_tokens = 100u64; + let rialto_tokens_in_millau_tokens = conversion_rate.saturating_mul_int(rialto_tokens); + assert_eq!(rialto_tokens_in_millau_tokens, 25); + }); + } + + #[test] + fn pallet_rejects_transactions_if_halted() { + run_test(|| { + // send message first to be able to check that delivery_proof fails later + send_regular_message(); + + IsHalted::::put(true); + + assert_noop!( + Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + REGULAR_PAYLOAD, + REGULAR_PAYLOAD.1, + ), + Error::::Halted, + ); + + assert_noop!( + Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), + 1, + REGULAR_PAYLOAD.1, + ), + Error::::Halted, + ); + + assert_noop!( + Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 1, + ..Default::default() + }, + ))), + Default::default(), + ), + Error::::Halted, + ); + }); + } + + #[test] + fn send_message_works() { + run_test(|| { + send_regular_message(); + }); + } + + #[test] + fn chain_verifier_rejects_invalid_message_in_send_message() { + run_test(|| { + // messages with this payload are rejected by target chain verifier + assert_noop!( + Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + PAYLOAD_REJECTED_BY_TARGET_CHAIN, + PAYLOAD_REJECTED_BY_TARGET_CHAIN.1 + ), + Error::::MessageRejectedByChainVerifier, + ); + }); + } + + #[test] + fn lane_verifier_rejects_invalid_message_in_send_message() { + run_test(|| { + // messages with zero fee are rejected by lane verifier + assert_noop!( + Pallet::::send_message(Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 0), + Error::::MessageRejectedByLaneVerifier, + ); + }); + } + + #[test] + fn message_send_fails_if_submitter_cant_pay_message_fee() { + run_test(|| { + TestMessageDeliveryAndDispatchPayment::reject_payments(); + assert_noop!( + Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + REGULAR_PAYLOAD, + REGULAR_PAYLOAD.1 + ), + Error::::FailedToWithdrawMessageFee, + ); + }); + } + + #[test] + fn receive_messages_proof_works() { + run_test(|| { + assert_ok!(Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), + 1, + REGULAR_PAYLOAD.1, + )); + + assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1); + }); + } + + #[test] + fn receive_messages_proof_updates_confirmed_message_nonce() { + run_test(|| { + // say we have received 10 messages && last confirmed message is 8 + InboundLanes::::insert( + TEST_LANE_ID, + InboundLaneData { + last_confirmed_nonce: 8, + relayers: vec![(9, 9, TEST_RELAYER_A), (10, 10, TEST_RELAYER_B)] + .into_iter() + .collect(), + }, + ); + assert_eq!( + Pallet::::inbound_unrewarded_relayers_state(TEST_LANE_ID), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + }, + ); + + // message proof includes outbound lane state with latest confirmed message updated to 9 + let mut message_proof: TestMessagesProof = Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); + message_proof.result.as_mut().unwrap()[0].1.lane_state = Some(OutboundLaneData { + latest_received_nonce: 9, + ..Default::default() + }); + + assert_ok!(Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + message_proof, + 1, + REGULAR_PAYLOAD.1, + )); + + assert_eq!( + InboundLanes::::get(TEST_LANE_ID), + InboundLaneData { + last_confirmed_nonce: 9, + relayers: vec![(10, 10, TEST_RELAYER_B), (11, 11, TEST_RELAYER_A)] + .into_iter() + .collect(), + }, + ); + assert_eq!( + Pallet::::inbound_unrewarded_relayers_state(TEST_LANE_ID), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + messages_in_oldest_entry: 1, + total_messages: 2, + }, + ); + }); + } + + #[test] + fn receive_messages_proof_rejects_invalid_dispatch_weight() { + run_test(|| { + assert_noop!( + Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), + 1, + REGULAR_PAYLOAD.1 - 1, + ), + Error::::InvalidMessagesDispatchWeight, + ); + }); + } + + #[test] + fn receive_messages_proof_rejects_invalid_proof() { + run_test(|| { + assert_noop!( + Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Err(()).into(), + 1, + 0, + ), + Error::::InvalidMessagesProof, + ); + }); + } + + #[test] + fn receive_messages_proof_rejects_proof_with_too_many_messages() { + run_test(|| { + assert_noop!( + Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), + u32::MAX, + 0, + ), + Error::::TooManyMessagesInTheProof, + ); + }); + } + + #[test] + fn receive_messages_delivery_proof_works() { + run_test(|| { + send_regular_message(); + receive_messages_delivery_proof(); + + assert_eq!( + OutboundLanes::::get(&TEST_LANE_ID).latest_received_nonce, + 1, + ); + }); + } + + #[test] + fn receive_messages_delivery_proof_rewards_relayers() { + run_test(|| { + assert_ok!(Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + REGULAR_PAYLOAD, + 1000, + )); + assert_ok!(Pallet::::send_message( + Origin::signed(1), + TEST_LANE_ID, + REGULAR_PAYLOAD, + 2000, + )); + + // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A + assert_ok!(Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![(1, 1, TEST_RELAYER_A)].into_iter().collect(), + ..Default::default() + } + ))), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, + )); + assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid( + TEST_RELAYER_A, + 1000 + )); + assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid( + TEST_RELAYER_B, + 2000 + )); + + // this reports delivery of both message 1 and message 2 => reward is paid only to TEST_RELAYER_B + assert_ok!(Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] + .into_iter() + .collect(), + ..Default::default() + } + ))), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + total_messages: 2, + ..Default::default() + }, + )); + assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid( + TEST_RELAYER_A, + 1000 + )); + assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid( + TEST_RELAYER_B, + 2000 + )); + }); + } + + #[test] + fn receive_messages_delivery_proof_rejects_invalid_proof() { + run_test(|| { + assert_noop!( + Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Err(())), + Default::default(), + ), + Error::::InvalidMessagesDeliveryProof, + ); + }); + } + + #[test] + fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { + run_test(|| { + // when number of relayers entires is invalid + assert_noop!( + Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] + .into_iter() + .collect(), + ..Default::default() + } + ))), + UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 2, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + + // when number of messages is invalid + assert_noop!( + Pallet::::receive_messages_delivery_proof( + Origin::signed(1), + TestMessagesDeliveryProof(Ok(( + TEST_LANE_ID, + InboundLaneData { + relayers: vec![(1, 1, TEST_RELAYER_A), (2, 2, TEST_RELAYER_B)] + .into_iter() + .collect(), + ..Default::default() + } + ))), + UnrewardedRelayersState { + unrewarded_relayer_entries: 2, + total_messages: 1, + ..Default::default() + }, + ), + Error::::InvalidUnrewardedRelayersState, + ); + }); + } + + #[test] + fn receive_messages_accepts_single_message_with_invalid_payload() { + run_test(|| { + let mut invalid_message = message(1, REGULAR_PAYLOAD); + invalid_message.data.payload = Vec::new(); + + assert_ok!(Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![invalid_message]).into(), + 1, + 0, // weight may be zero in this case (all messages are improperly encoded) + ),); + + assert_eq!( + InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), + 1, + ); + }); + } + + #[test] + fn receive_messages_accepts_batch_with_message_with_invalid_payload() { + run_test(|| { + let mut invalid_message = message(2, REGULAR_PAYLOAD); + invalid_message.data.payload = Vec::new(); + + assert_ok!(Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + Ok(vec![ + message(1, REGULAR_PAYLOAD), + invalid_message, + message(3, REGULAR_PAYLOAD), + ]) + .into(), + 3, + REGULAR_PAYLOAD.1 + REGULAR_PAYLOAD.1, + ),); + + assert_eq!( + InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), + 3, + ); + }); + } + + #[test] + fn storage_message_key_computed_properly() { + // If this test fails, then something has been changed in module storage that is breaking all + // previously crafted messages proofs. + let storage_key = storage_keys::message_key::(&*b"test", 42).0; + assert_eq!( + storage_key, + hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), + "Unexpected storage key: {}", + hex::encode(&storage_key), + ); + } + + #[test] + fn outbound_lane_data_key_computed_properly() { + // If this test fails, then something has been changed in module storage that is breaking all + // previously crafted outbound lane state proofs. + let storage_key = storage_keys::outbound_lane_data_key::(&*b"test").0; + assert_eq!( + storage_key, + hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), + "Unexpected storage key: {}", + hex::encode(&storage_key), + ); + } + + #[test] + fn inbound_lane_data_key_computed_properly() { + // If this test fails, then something has been changed in module storage that is breaking all + // previously crafted inbound lane state proofs. + let storage_key = storage_keys::inbound_lane_data_key::(&*b"test").0; + assert_eq!( + storage_key, + hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), + "Unexpected storage key: {}", + hex::encode(&storage_key), + ); + } + + #[test] + fn actual_dispatch_weight_does_not_overlow() { + run_test(|| { + let message1 = message(1, TestPayload(0, Weight::MAX / 2)); + let message2 = message(2, TestPayload(0, Weight::MAX / 2)); + let message3 = message(2, TestPayload(0, Weight::MAX / 2)); + + assert_noop!( + Pallet::::receive_messages_proof( + Origin::signed(1), + TEST_RELAYER_A, + // this may cause overflow if source chain storage is invalid + Ok(vec![message1, message2, message3]).into(), + 3, + 100, + ), + Error::::InvalidMessagesDispatchWeight, + ); + }); + } + + #[test] + fn increase_message_fee_fails_if_message_is_already_delivered() { + run_test(|| { + send_regular_message(); + receive_messages_delivery_proof(); + + assert_noop!( + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), + Error::::MessageIsAlreadyDelivered, + ); + }); + } + + #[test] + fn increase_message_fee_fails_if_message_is_not_yet_sent() { + run_test(|| { + assert_noop!( + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), + Error::::MessageIsNotYetSent, + ); + }); + } + + #[test] + fn increase_message_fee_fails_if_submitter_cant_pay_additional_fee() { + run_test(|| { + send_regular_message(); + + TestMessageDeliveryAndDispatchPayment::reject_payments(); + + assert_noop!( + Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,), + Error::::FailedToWithdrawMessageFee, + ); + }); + } + + #[test] + fn increase_message_fee_succeeds() { + run_test(|| { + send_regular_message(); + + assert_ok!(Pallet::::increase_message_fee( + Origin::signed(1), + TEST_LANE_ID, + 1, + 100, + ),); + assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, 100)); + }); + } +} diff --git a/polkadot/modules/messages/src/mock.rs b/polkadot/modules/messages/src/mock.rs new file mode 100644 index 00000000000..e640fa78054 --- /dev/null +++ b/polkadot/modules/messages/src/mock.rs @@ -0,0 +1,404 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +// From construct_runtime macro +#![allow(clippy::from_over_into)] + +use crate::Config; + +use bp_messages::{ + source_chain::{ + LaneMessageVerifier, MessageDeliveryAndDispatchPayment, RelayersRewards, Sender, TargetHeaderChain, + }, + target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, + InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, + Parameter as MessagesParameter, +}; +use bp_runtime::Size; +use codec::{Decode, Encode}; +use frame_support::{parameter_types, weights::Weight}; +use sp_core::H256; +use sp_runtime::{ + testing::Header as SubstrateHeader, + traits::{BlakeTwo256, IdentityLookup}, + FixedU128, Perbill, +}; +use std::collections::BTreeMap; + +pub type AccountId = u64; +pub type Balance = u64; +#[derive(Decode, Encode, Clone, Debug, PartialEq, Eq)] +pub struct TestPayload(pub u64, pub Weight); +pub type TestMessageFee = u64; +pub type TestRelayer = u64; + +pub struct AccountIdConverter; + +impl sp_runtime::traits::Convert for AccountIdConverter { + fn convert(hash: H256) -> AccountId { + hash.to_low_u64_ne() + } +} + +type Block = frame_system::mocking::MockBlock; +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + +use crate as pallet_bridge_messages; + +frame_support::construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Event}, + Messages: pallet_bridge_messages::{Pallet, Call, Event}, + } +} + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} + +impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = SubstrateHeader; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type SS58Prefix = (); + type OnSetCode = (); +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for TestRuntime { + type MaxLocks = (); + type Balance = Balance; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = frame_system::Pallet; + type WeightInfo = (); +} + +parameter_types! { + pub const MaxMessagesToPruneAtOnce: u64 = 10; + pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16; + pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 32; + pub storage TokenConversionRate: FixedU128 = 1.into(); +} + +#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] +pub enum TestMessagesParameter { + TokenConversionRate(FixedU128), +} + +impl MessagesParameter for TestMessagesParameter { + fn save(&self) { + match *self { + TestMessagesParameter::TokenConversionRate(conversion_rate) => TokenConversionRate::set(&conversion_rate), + } + } +} + +impl Config for TestRuntime { + type Event = Event; + type WeightInfo = (); + type Parameter = TestMessagesParameter; + type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; + type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; + type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; + + type OutboundPayload = TestPayload; + type OutboundMessageFee = TestMessageFee; + + type InboundPayload = TestPayload; + type InboundMessageFee = TestMessageFee; + type InboundRelayer = TestRelayer; + + type AccountIdConverter = AccountIdConverter; + + type TargetHeaderChain = TestTargetHeaderChain; + type LaneMessageVerifier = TestLaneMessageVerifier; + type MessageDeliveryAndDispatchPayment = TestMessageDeliveryAndDispatchPayment; + + type SourceHeaderChain = TestSourceHeaderChain; + type MessageDispatch = TestMessageDispatch; +} + +impl Size for TestPayload { + fn size_hint(&self) -> u32 { + 16 + } +} + +/// Account that has balance to use in tests. +pub const ENDOWED_ACCOUNT: AccountId = 0xDEAD; + +/// Account id of test relayer. +pub const TEST_RELAYER_A: AccountId = 100; + +/// Account id of additional test relayer - B. +pub const TEST_RELAYER_B: AccountId = 101; + +/// Account id of additional test relayer - C. +pub const TEST_RELAYER_C: AccountId = 102; + +/// Error that is returned by all test implementations. +pub const TEST_ERROR: &str = "Test error"; + +/// Lane that we're using in tests. +pub const TEST_LANE_ID: LaneId = [0, 0, 0, 1]; + +/// Regular message payload. +pub const REGULAR_PAYLOAD: TestPayload = TestPayload(0, 50); + +/// Payload that is rejected by `TestTargetHeaderChain`. +pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = TestPayload(1, 50); + +/// Vec of proved messages, grouped by lane. +pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages>)>; + +/// Test messages proof. +#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq)] +pub struct TestMessagesProof { + pub result: Result, +} + +impl Size for TestMessagesProof { + fn size_hint(&self) -> u32 { + 0 + } +} + +impl From>, ()>> for TestMessagesProof { + fn from(result: Result>, ()>) -> Self { + Self { + result: result.map(|messages| { + let mut messages_by_lane: BTreeMap>> = + BTreeMap::new(); + for message in messages { + messages_by_lane + .entry(message.key.lane_id) + .or_default() + .messages + .push(message); + } + messages_by_lane.into_iter().collect() + }), + } + } +} + +/// Messages delivery proof used in tests. +#[derive(Debug, Encode, Decode, Eq, Clone, PartialEq)] +pub struct TestMessagesDeliveryProof(pub Result<(LaneId, InboundLaneData), ()>); + +impl Size for TestMessagesDeliveryProof { + fn size_hint(&self) -> u32 { + 0 + } +} + +/// Target header chain that is used in tests. +#[derive(Debug, Default)] +pub struct TestTargetHeaderChain; + +impl TargetHeaderChain for TestTargetHeaderChain { + type Error = &'static str; + + type MessagesDeliveryProof = TestMessagesDeliveryProof; + + fn verify_message(payload: &TestPayload) -> Result<(), Self::Error> { + if *payload == PAYLOAD_REJECTED_BY_TARGET_CHAIN { + Err(TEST_ERROR) + } else { + Ok(()) + } + } + + fn verify_messages_delivery_proof( + proof: Self::MessagesDeliveryProof, + ) -> Result<(LaneId, InboundLaneData), Self::Error> { + proof.0.map_err(|_| TEST_ERROR) + } +} + +/// Lane message verifier that is used in tests. +#[derive(Debug, Default)] +pub struct TestLaneMessageVerifier; + +impl LaneMessageVerifier for TestLaneMessageVerifier { + type Error = &'static str; + + fn verify_message( + _submitter: &Sender, + delivery_and_dispatch_fee: &TestMessageFee, + _lane: &LaneId, + _lane_outbound_data: &OutboundLaneData, + _payload: &TestPayload, + ) -> Result<(), Self::Error> { + if *delivery_and_dispatch_fee != 0 { + Ok(()) + } else { + Err(TEST_ERROR) + } + } +} + +/// Message fee payment system that is used in tests. +#[derive(Debug, Default)] +pub struct TestMessageDeliveryAndDispatchPayment; + +impl TestMessageDeliveryAndDispatchPayment { + /// Reject all payments. + pub fn reject_payments() { + frame_support::storage::unhashed::put(b":reject-message-fee:", &true); + } + + /// Returns true if given fee has been paid by given submitter. + pub fn is_fee_paid(submitter: AccountId, fee: TestMessageFee) -> bool { + frame_support::storage::unhashed::get(b":message-fee:") == Some((Sender::Signed(submitter), fee)) + } + + /// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is + /// cleared after the call. + pub fn is_reward_paid(relayer: AccountId, fee: TestMessageFee) -> bool { + let key = (b":relayer-reward:", relayer, fee).encode(); + frame_support::storage::unhashed::take::(&key).is_some() + } +} + +impl MessageDeliveryAndDispatchPayment for TestMessageDeliveryAndDispatchPayment { + type Error = &'static str; + + fn pay_delivery_and_dispatch_fee( + submitter: &Sender, + fee: &TestMessageFee, + _relayer_fund_account: &AccountId, + ) -> Result<(), Self::Error> { + if frame_support::storage::unhashed::get(b":reject-message-fee:") == Some(true) { + return Err(TEST_ERROR); + } + + frame_support::storage::unhashed::put(b":message-fee:", &(submitter, fee)); + Ok(()) + } + + fn pay_relayers_rewards( + _confirmation_relayer: &AccountId, + relayers_rewards: RelayersRewards, + _relayer_fund_account: &AccountId, + ) { + for (relayer, reward) in relayers_rewards { + let key = (b":relayer-reward:", relayer, reward.reward).encode(); + frame_support::storage::unhashed::put(&key, &true); + } + } +} + +/// Source header chain that is used in tests. +#[derive(Debug)] +pub struct TestSourceHeaderChain; + +impl SourceHeaderChain for TestSourceHeaderChain { + type Error = &'static str; + + type MessagesProof = TestMessagesProof; + + fn verify_messages_proof( + proof: Self::MessagesProof, + _messages_count: u32, + ) -> Result>, Self::Error> { + proof + .result + .map(|proof| proof.into_iter().collect()) + .map_err(|_| TEST_ERROR) + } +} + +/// Source header chain that is used in tests. +#[derive(Debug)] +pub struct TestMessageDispatch; + +impl MessageDispatch for TestMessageDispatch { + type DispatchPayload = TestPayload; + + fn dispatch_weight(message: &DispatchMessage) -> Weight { + match message.data.payload.as_ref() { + Ok(payload) => payload.1, + Err(_) => 0, + } + } + + fn dispatch(_message: DispatchMessage) {} +} + +/// Return test lane message with given nonce and payload. +pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message { + Message { + key: MessageKey { + lane_id: TEST_LANE_ID, + nonce, + }, + data: message_data(payload), + } +} + +/// Return message data with valid fee for given payload. +pub fn message_data(payload: TestPayload) -> MessageData { + MessageData { + payload: payload.encode(), + fee: 1, + } +} + +/// Run pallet test. +pub fn run_test(test: impl FnOnce() -> T) -> T { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(ENDOWED_ACCOUNT, 1_000_000)], + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(test) +} diff --git a/polkadot/modules/messages/src/outbound_lane.rs b/polkadot/modules/messages/src/outbound_lane.rs new file mode 100644 index 00000000000..47616c33eac --- /dev/null +++ b/polkadot/modules/messages/src/outbound_lane.rs @@ -0,0 +1,203 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Everything about outgoing messages sending. + +use bp_messages::{LaneId, MessageData, MessageNonce, OutboundLaneData}; + +/// Outbound lane storage. +pub trait OutboundLaneStorage { + /// Delivery and dispatch fee type on source chain. + type MessageFee; + + /// Lane id. + fn id(&self) -> LaneId; + /// Get lane data from the storage. + fn data(&self) -> OutboundLaneData; + /// Update lane data in the storage. + fn set_data(&mut self, data: OutboundLaneData); + /// Returns saved outbound message payload. + #[cfg(test)] + fn message(&self, nonce: &MessageNonce) -> Option>; + /// Save outbound message in the storage. + fn save_message(&mut self, nonce: MessageNonce, message_data: MessageData); + /// Remove outbound message from the storage. + fn remove_message(&mut self, nonce: &MessageNonce); +} + +/// Outbound messages lane. +pub struct OutboundLane { + storage: S, +} + +impl OutboundLane { + /// Create new inbound lane backed by given storage. + pub fn new(storage: S) -> Self { + OutboundLane { storage } + } + + /// Get this lane data. + pub fn data(&self) -> OutboundLaneData { + self.storage.data() + } + + /// Send message over lane. + /// + /// Returns new message nonce. + pub fn send_message(&mut self, message_data: MessageData) -> MessageNonce { + let mut data = self.storage.data(); + let nonce = data.latest_generated_nonce + 1; + data.latest_generated_nonce = nonce; + + self.storage.save_message(nonce, message_data); + self.storage.set_data(data); + + nonce + } + + /// Confirm messages delivery. + /// + /// Returns `None` if confirmation is wrong/duplicate. + /// Returns `Some` with inclusive ranges of message nonces that have been received. + pub fn confirm_delivery(&mut self, latest_received_nonce: MessageNonce) -> Option<(MessageNonce, MessageNonce)> { + let mut data = self.storage.data(); + if latest_received_nonce <= data.latest_received_nonce || latest_received_nonce > data.latest_generated_nonce { + return None; + } + + let prev_latest_received_nonce = data.latest_received_nonce; + data.latest_received_nonce = latest_received_nonce; + self.storage.set_data(data); + + Some((prev_latest_received_nonce + 1, latest_received_nonce)) + } + + /// Prune at most `max_messages_to_prune` already received messages. + /// + /// Returns number of pruned messages. + pub fn prune_messages(&mut self, max_messages_to_prune: MessageNonce) -> MessageNonce { + let mut pruned_messages = 0; + let mut anything_changed = false; + let mut data = self.storage.data(); + while pruned_messages < max_messages_to_prune && data.oldest_unpruned_nonce <= data.latest_received_nonce { + self.storage.remove_message(&data.oldest_unpruned_nonce); + + anything_changed = true; + pruned_messages += 1; + data.oldest_unpruned_nonce += 1; + } + + if anything_changed { + self.storage.set_data(data); + } + + pruned_messages + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + mock::{message_data, run_test, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID}, + outbound_lane, + }; + + #[test] + fn send_message_works() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + assert_eq!(lane.storage.data().latest_generated_nonce, 0); + assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 1); + assert!(lane.storage.message(&1).is_some()); + assert_eq!(lane.storage.data().latest_generated_nonce, 1); + }); + } + + #[test] + fn confirm_delivery_works() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 1); + assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 2); + assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 3); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.confirm_delivery(3), Some((1, 3))); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 3); + }); + } + + #[test] + fn confirm_delivery_rejects_nonce_lesser_than_latest_received() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.confirm_delivery(3), Some((1, 3))); + assert_eq!(lane.confirm_delivery(3), None); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 3); + + assert_eq!(lane.confirm_delivery(2), None); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 3); + }); + } + + #[test] + fn confirm_delivery_rejects_nonce_larger_than_last_generated() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + assert_eq!(lane.confirm_delivery(10), None); + assert_eq!(lane.storage.data().latest_generated_nonce, 3); + assert_eq!(lane.storage.data().latest_received_nonce, 0); + }); + } + + #[test] + fn prune_messages_works() { + run_test(|| { + let mut lane = outbound_lane::(TEST_LANE_ID); + // when lane is empty, nothing is pruned + assert_eq!(lane.prune_messages(100), 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + // when nothing is confirmed, nothing is pruned + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + lane.send_message(message_data(REGULAR_PAYLOAD)); + assert_eq!(lane.prune_messages(100), 0); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); + // after confirmation, some messages are received + assert_eq!(lane.confirm_delivery(2), Some((1, 2))); + assert_eq!(lane.prune_messages(100), 2); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3); + // after last message is confirmed, everything is pruned + assert_eq!(lane.confirm_delivery(3), Some((3, 3))); + assert_eq!(lane.prune_messages(100), 1); + assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4); + }); + } +} diff --git a/polkadot/modules/messages/src/weights.rs b/polkadot/modules/messages/src/weights.rs new file mode 100644 index 00000000000..0eecd0d8462 --- /dev/null +++ b/polkadot/modules/messages/src/weights.rs @@ -0,0 +1,289 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Autogenerated weights for pallet_bridge_messages +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-04-14, STEPS: [50, ], REPEAT: 20 +//! LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled +//! CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/rialto-bridge-node +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_bridge_messages +// --extrinsic=* +// --execution=wasm +// --wasm-execution=Compiled +// --heap-pages=4096 +// --output=./modules/messages/src/weights.rs +// --template=./.maintain/rialto-weight-template.hbs + +#![allow(clippy::all)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{ + traits::Get, + weights::{constants::RocksDbWeight, Weight}, +}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_bridge_messages. +pub trait WeightInfo { + fn send_minimal_message_worst_case() -> Weight; + fn send_1_kb_message_worst_case() -> Weight; + fn send_16_kb_message_worst_case() -> Weight; + fn increase_message_fee() -> Weight; + fn receive_single_message_proof() -> Weight; + fn receive_two_messages_proof() -> Weight; + fn receive_single_message_proof_with_outbound_lane_state() -> Weight; + fn receive_single_message_proof_1_kb() -> Weight; + fn receive_single_message_proof_16_kb() -> Weight; + fn receive_delivery_proof_for_single_message() -> Weight; + fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight; + fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight; + fn send_messages_of_various_lengths(i: u32) -> Weight; + fn receive_multiple_messages_proof(i: u32) -> Weight; + fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight; + fn receive_message_proofs_with_large_leaf(i: u32) -> Weight; + fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight; + fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight; + fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight; +} + +/// Weights for pallet_bridge_messages using the Rialto node and recommended hardware. +pub struct RialtoWeight(PhantomData); +impl WeightInfo for RialtoWeight { + fn send_minimal_message_worst_case() -> Weight { + (149_497_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) + } + fn send_1_kb_message_worst_case() -> Weight { + (154_339_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) + } + fn send_16_kb_message_worst_case() -> Weight { + (200_066_000 as Weight) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) + } + fn increase_message_fee() -> Weight { + (6_432_637_000 as Weight) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn receive_single_message_proof() -> Weight { + (141_671_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_two_messages_proof() -> Weight { + (247_393_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + (159_312_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_single_message_proof_1_kb() -> Weight { + (167_935_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_single_message_proof_16_kb() -> Weight { + (449_846_000 as Weight) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_delivery_proof_for_single_message() -> Weight { + (127_322_000 as Weight) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { + (134_120_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { + (191_193_000 as Weight) + .saturating_add(T::DbWeight::get().reads(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn send_messages_of_various_lengths(i: u32) -> Weight { + (115_699_000 as Weight) + .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().writes(12 as Weight)) + } + fn receive_multiple_messages_proof(i: u32) -> Weight { + (0 as Weight) + .saturating_add((113_551_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { + (458_731_000 as Weight) + .saturating_add((9_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { + (82_314_000 as Weight) + .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { + (16_766_000 as Weight) + .saturating_add((115_533_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { + (122_146_000 as Weight) + .saturating_add((6_789_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { + (155_671_000 as Weight) + .saturating_add((63_020_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(T::DbWeight::get().reads(5 as Weight)) + .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn send_minimal_message_worst_case() -> Weight { + (149_497_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + } + fn send_1_kb_message_worst_case() -> Weight { + (154_339_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + } + fn send_16_kb_message_worst_case() -> Weight { + (200_066_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + } + fn increase_message_fee() -> Weight { + (6_432_637_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(4 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn receive_single_message_proof() -> Weight { + (141_671_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_two_messages_proof() -> Weight { + (247_393_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_single_message_proof_with_outbound_lane_state() -> Weight { + (159_312_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_single_message_proof_1_kb() -> Weight { + (167_935_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_single_message_proof_16_kb() -> Weight { + (449_846_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_delivery_proof_for_single_message() -> Weight { + (127_322_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { + (134_120_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { + (191_193_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn send_messages_of_various_lengths(i: u32) -> Weight { + (115_699_000 as Weight) + .saturating_add((3_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().writes(12 as Weight)) + } + fn receive_multiple_messages_proof(i: u32) -> Weight { + (0 as Weight) + .saturating_add((113_551_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_message_proofs_with_extra_nodes(i: u32) -> Weight { + (458_731_000 as Weight) + .saturating_add((9_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_message_proofs_with_large_leaf(i: u32) -> Weight { + (82_314_000 as Weight) + .saturating_add((7_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_multiple_messages_proof_with_outbound_lane_state(i: u32) -> Weight { + (16_766_000 as Weight) + .saturating_add((115_533_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn receive_delivery_proof_for_multiple_messages_by_single_relayer(i: u32) -> Weight { + (122_146_000 as Weight) + .saturating_add((6_789_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(i as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight { + (155_671_000 as Weight) + .saturating_add((63_020_000 as Weight).saturating_mul(i as Weight)) + .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(i as Weight))) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) + } +} diff --git a/polkadot/modules/messages/src/weights_ext.rs b/polkadot/modules/messages/src/weights_ext.rs new file mode 100644 index 00000000000..cb754a10231 --- /dev/null +++ b/polkadot/modules/messages/src/weights_ext.rs @@ -0,0 +1,319 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Weight-related utilities. + +use crate::weights::WeightInfo; + +use bp_messages::{MessageNonce, UnrewardedRelayersState}; +use bp_runtime::{PreComputedSize, Size}; +use frame_support::weights::Weight; + +/// Size of the message being delivered in benchmarks. +pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128; + +/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of calls +/// we're checking here would fit 1KB. +const SIGNED_EXTENSIONS_SIZE: u32 = 1024; + +/// Ensure that weights from `WeightInfoExt` implementation are looking correct. +pub fn ensure_weights_are_correct( + expected_default_message_delivery_tx_weight: Weight, + expected_additional_byte_delivery_weight: Weight, + expected_messages_delivery_confirmation_tx_weight: Weight, +) { + // verify `send_message` weight components + assert_ne!(W::send_message_overhead(), 0); + assert_ne!(W::send_message_size_overhead(0), 0); + + // verify `receive_messages_proof` weight components + assert_ne!(W::receive_messages_proof_overhead(), 0); + assert_ne!(W::receive_messages_proof_messages_overhead(1), 0); + assert_ne!(W::receive_messages_proof_outbound_lane_state_overhead(), 0); + assert_ne!(W::storage_proof_size_overhead(1), 0); + + // verify that the hardcoded value covers `receive_messages_proof` weight + let actual_single_regular_message_delivery_tx_weight = W::receive_messages_proof_weight( + &PreComputedSize((EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize), + 1, + 0, + ); + assert!( + actual_single_regular_message_delivery_tx_weight <= expected_default_message_delivery_tx_weight, + "Default message delivery transaction weight {} is larger than expected weight {}", + actual_single_regular_message_delivery_tx_weight, + expected_default_message_delivery_tx_weight, + ); + + // verify that hardcoded value covers additional byte length of `receive_messages_proof` weight + let actual_additional_byte_delivery_weight = W::storage_proof_size_overhead(1); + assert!( + actual_additional_byte_delivery_weight <= expected_additional_byte_delivery_weight, + "Single additional byte delivery weight {} is larger than expected weight {}", + actual_additional_byte_delivery_weight, + expected_additional_byte_delivery_weight, + ); + + // verify `receive_messages_delivery_proof` weight components + assert_ne!(W::receive_messages_delivery_proof_overhead(), 0); + assert_ne!(W::receive_messages_delivery_proof_messages_overhead(1), 0); + assert_ne!(W::receive_messages_delivery_proof_relayers_overhead(1), 0); + assert_ne!(W::storage_proof_size_overhead(1), 0); + + // verify that the hardcoded value covers `receive_messages_delivery_proof` weight + let actual_messages_delivery_confirmation_tx_weight = W::receive_messages_delivery_proof_weight( + &PreComputedSize(W::expected_extra_storage_proof_size() as usize), + &UnrewardedRelayersState { + unrewarded_relayer_entries: 1, + total_messages: 1, + ..Default::default() + }, + ); + assert!( + actual_messages_delivery_confirmation_tx_weight <= expected_messages_delivery_confirmation_tx_weight, + "Messages delivery confirmation transaction weight {} is larger than expected weight {}", + actual_messages_delivery_confirmation_tx_weight, + expected_messages_delivery_confirmation_tx_weight, + ); +} + +/// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain. +pub fn ensure_able_to_receive_message( + max_extrinsic_size: u32, + max_extrinsic_weight: Weight, + max_incoming_message_proof_size: u32, + max_incoming_message_dispatch_weight: Weight, +) { + // verify that we're able to receive proof of maximal-size message + let max_delivery_transaction_size = max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); + assert!( + max_delivery_transaction_size <= max_extrinsic_size, + "Size of maximal message delivery transaction {} + {} is larger than maximal possible transaction size {}", + max_incoming_message_proof_size, + SIGNED_EXTENSIONS_SIZE, + max_extrinsic_size, + ); + + // verify that we're able to receive proof of maximal-size message with maximal dispatch weight + let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight( + &PreComputedSize((max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize), + 1, + max_incoming_message_dispatch_weight, + ); + assert!( + max_delivery_transaction_dispatch_weight <= max_extrinsic_weight, + "Weight of maximal message delivery transaction + {} is larger than maximal possible transaction weight {}", + max_delivery_transaction_dispatch_weight, + max_extrinsic_weight, + ); +} + +/// Ensure that we're able to receive maximal confirmation from other chain. +pub fn ensure_able_to_receive_confirmation( + max_extrinsic_size: u32, + max_extrinsic_weight: Weight, + max_inbound_lane_data_proof_size_from_peer_chain: u32, + max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce, + max_unconfirmed_messages_at_inbound_lane: MessageNonce, +) { + // verify that we're able to receive confirmation of maximal-size + let max_confirmation_transaction_size = + max_inbound_lane_data_proof_size_from_peer_chain.saturating_add(SIGNED_EXTENSIONS_SIZE); + assert!( + max_confirmation_transaction_size <= max_extrinsic_size, + "Size of maximal message delivery confirmation transaction {} + {} is larger than maximal possible transaction size {}", + max_inbound_lane_data_proof_size_from_peer_chain, + SIGNED_EXTENSIONS_SIZE, + max_extrinsic_size, + ); + + // verify that we're able to reward maximal number of relayers that have delivered maximal number of messages + let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight( + &PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize), + &UnrewardedRelayersState { + unrewarded_relayer_entries: max_unrewarded_relayer_entries_at_peer_inbound_lane, + total_messages: max_unconfirmed_messages_at_inbound_lane, + ..Default::default() + }, + ); + assert!( + max_confirmation_transaction_dispatch_weight <= max_extrinsic_weight, + "Weight of maximal confirmation transaction {} is larger than maximal possible transaction weight {}", + max_confirmation_transaction_dispatch_weight, + max_extrinsic_weight, + ); +} + +/// Extended weight info. +pub trait WeightInfoExt: WeightInfo { + /// Size of proof that is already included in the single message delivery weight. + /// + /// The message submitter (at source chain) has already covered this cost. But there are two + /// factors that may increase proof size: (1) the message size may be larger than predefined + /// and (2) relayer may add extra trie nodes to the proof. So if proof size is larger than + /// this value, we're going to charge relayer for that. + fn expected_extra_storage_proof_size() -> u32; + + // Functions that are directly mapped to extrinsics weights. + + /// Weight of message send extrinsic. + fn send_message_weight(message: &impl Size) -> Weight { + let transaction_overhead = Self::send_message_overhead(); + let message_size_overhead = Self::send_message_size_overhead(message.size_hint()); + + transaction_overhead.saturating_add(message_size_overhead) + } + + /// Weight of message delivery extrinsic. + fn receive_messages_proof_weight(proof: &impl Size, messages_count: u32, dispatch_weight: Weight) -> Weight { + // basic components of extrinsic weight + let transaction_overhead = Self::receive_messages_proof_overhead(); + let outbound_state_delivery_weight = Self::receive_messages_proof_outbound_lane_state_overhead(); + let messages_delivery_weight = + Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count)); + let messages_dispatch_weight = dispatch_weight; + + // proof size overhead weight + let expected_proof_size = EXPECTED_DEFAULT_MESSAGE_LENGTH + .saturating_mul(messages_count.saturating_sub(1)) + .saturating_add(Self::expected_extra_storage_proof_size()); + let actual_proof_size = proof.size_hint(); + let proof_size_overhead = + Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size)); + + transaction_overhead + .saturating_add(outbound_state_delivery_weight) + .saturating_add(messages_delivery_weight) + .saturating_add(messages_dispatch_weight) + .saturating_add(proof_size_overhead) + } + + /// Weight of confirmation delivery extrinsic. + fn receive_messages_delivery_proof_weight(proof: &impl Size, relayers_state: &UnrewardedRelayersState) -> Weight { + // basic components of extrinsic weight + let transaction_overhead = Self::receive_messages_delivery_proof_overhead(); + let messages_overhead = Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages); + let relayers_overhead = + Self::receive_messages_delivery_proof_relayers_overhead(relayers_state.unrewarded_relayer_entries); + + // proof size overhead weight + let expected_proof_size = Self::expected_extra_storage_proof_size(); + let actual_proof_size = proof.size_hint(); + let proof_size_overhead = + Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size)); + + transaction_overhead + .saturating_add(messages_overhead) + .saturating_add(relayers_overhead) + .saturating_add(proof_size_overhead) + } + + // Functions that are used by extrinsics weights formulas. + + /// Returns weight of message send transaction (`send_message`). + fn send_message_overhead() -> Weight { + Self::send_minimal_message_worst_case() + } + + /// Returns weight that needs to be accounted when message of given size is sent (`send_message`). + fn send_message_size_overhead(message_size: u32) -> Weight { + let message_size_in_kb = (1024u64 + message_size as u64) / 1024; + let single_kb_weight = (Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15; + message_size_in_kb * single_kb_weight + } + + /// Returns weight overhead of message delivery transaction (`receive_messages_proof`). + fn receive_messages_proof_overhead() -> Weight { + let weight_of_two_messages_and_two_tx_overheads = Self::receive_single_message_proof().saturating_mul(2); + let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); + weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) + } + + /// Returns weight that needs to be accounted when receiving given number of messages with message + /// delivery transaction (`receive_messages_proof`). + fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { + let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); + let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof(); + weight_of_two_messages_and_single_tx_overhead + .saturating_sub(weight_of_single_message_and_single_tx_overhead) + .saturating_mul(messages as Weight) + } + + /// Returns weight that needs to be accounted when message delivery transaction (`receive_messages_proof`) + /// is carrying outbound lane state proof. + fn receive_messages_proof_outbound_lane_state_overhead() -> Weight { + let weight_of_single_message_and_lane_state = Self::receive_single_message_proof_with_outbound_lane_state(); + let weight_of_single_message = Self::receive_single_message_proof(); + weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message) + } + + /// Returns weight overhead of delivery confirmation transaction (`receive_messages_delivery_proof`). + fn receive_messages_delivery_proof_overhead() -> Weight { + let weight_of_two_messages_and_two_tx_overheads = + Self::receive_delivery_proof_for_single_message().saturating_mul(2); + let weight_of_two_messages_and_single_tx_overhead = + Self::receive_delivery_proof_for_two_messages_by_single_relayer(); + weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead) + } + + /// Returns weight that needs to be accounted when receiving confirmations for given number of + /// messages with delivery confirmation transaction (`receive_messages_delivery_proof`). + fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight { + let weight_of_two_messages = Self::receive_delivery_proof_for_two_messages_by_single_relayer(); + let weight_of_single_message = Self::receive_delivery_proof_for_single_message(); + weight_of_two_messages + .saturating_sub(weight_of_single_message) + .saturating_mul(messages as Weight) + } + + /// Returns weight that needs to be accounted when receiving confirmations for given number of + /// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`). + fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight { + let weight_of_two_messages_by_two_relayers = Self::receive_delivery_proof_for_two_messages_by_two_relayers(); + let weight_of_two_messages_by_single_relayer = + Self::receive_delivery_proof_for_two_messages_by_single_relayer(); + weight_of_two_messages_by_two_relayers + .saturating_sub(weight_of_two_messages_by_single_relayer) + .saturating_mul(relayers as Weight) + } + + /// Returns weight that needs to be accounted when storage proof of given size is recieved (either in + /// `receive_messages_proof` or `receive_messages_delivery_proof`). + /// + /// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof + /// size depends on messages count or number of entries in the unrewarded relayers set. So this + /// shouldn't be added to cost of transaction, but instead should act as a minimal cost that the + /// relayer must pay when it relays proof of given size (even if cost based on other parameters + /// is less than that cost). + fn storage_proof_size_overhead(proof_size: u32) -> Weight { + let proof_size_in_bytes = proof_size as Weight; + let byte_weight = + (Self::receive_single_message_proof_16_kb() - Self::receive_single_message_proof_1_kb()) / (15 * 1024); + proof_size_in_bytes * byte_weight + } +} + +impl WeightInfoExt for () { + fn expected_extra_storage_proof_size() -> u32 { + bp_rialto::EXTRA_STORAGE_PROOF_SIZE + } +} + +impl WeightInfoExt for crate::weights::RialtoWeight { + fn expected_extra_storage_proof_size() -> u32 { + bp_rialto::EXTRA_STORAGE_PROOF_SIZE + } +} diff --git a/polkadot/modules/shift-session-manager/Cargo.toml b/polkadot/modules/shift-session-manager/Cargo.toml new file mode 100644 index 00000000000..6dac97ddde6 --- /dev/null +++ b/polkadot/modules/shift-session-manager/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "pallet-shift-session-manager" +description = "A Substrate Runtime module that selects 2/3 of initial validators for every session" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +serde = "1.0" + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "pallet-session/std", + "sp-staking/std", + "sp-std/std", +] diff --git a/polkadot/modules/shift-session-manager/src/lib.rs b/polkadot/modules/shift-session-manager/src/lib.rs new file mode 100644 index 00000000000..0d867657afa --- /dev/null +++ b/polkadot/modules/shift-session-manager/src/lib.rs @@ -0,0 +1,228 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate session manager that selects 2/3 validators from initial set, +//! starting from session 2. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::{decl_module, decl_storage}; +use sp_std::prelude::*; + +/// The module configuration trait. +pub trait Config: pallet_session::Config {} + +decl_module! { + /// Shift session manager pallet. + pub struct Module for enum Call where origin: T::Origin {} +} + +decl_storage! { + trait Store for Pallet as ShiftSessionManager { + /// Validators of first two sessions. + InitialValidators: Option>; + } +} + +impl pallet_session::SessionManager for Pallet { + fn end_session(_: sp_staking::SessionIndex) {} + fn start_session(_: sp_staking::SessionIndex) {} + fn new_session(session_index: sp_staking::SessionIndex) -> Option> { + // we don't want to add even more fields to genesis config => just return None + if session_index == 0 || session_index == 1 { + return None; + } + + // the idea that on first call (i.e. when session 1 ends) we're reading current + // set of validators from session module (they are initial validators) and save + // in our 'local storage'. + // then for every session we select (deterministically) 2/3 of these initial + // validators to serve validators of new session + let available_validators = InitialValidators::::get().unwrap_or_else(|| { + let validators = >::validators(); + InitialValidators::::put(validators.clone()); + validators + }); + + Some(Self::select_validators(session_index, &available_validators)) + } +} + +impl Pallet { + /// Select validators for session. + fn select_validators( + session_index: sp_staking::SessionIndex, + available_validators: &[T::ValidatorId], + ) -> Vec { + let available_validators_count = available_validators.len(); + let count = sp_std::cmp::max(1, 2 * available_validators_count / 3); + let offset = session_index as usize % available_validators_count; + let end = offset + count; + let session_validators = match end.overflowing_sub(available_validators_count) { + (wrapped_end, false) if wrapped_end != 0 => available_validators[offset..] + .iter() + .chain(available_validators[..wrapped_end].iter()) + .cloned() + .collect(), + _ => available_validators[offset..end].to_vec(), + }; + + session_validators + } +} + +#[cfg(test)] +mod tests { + // From construct_runtime macro + #![allow(clippy::from_over_into)] + + use super::*; + use frame_support::sp_io::TestExternalities; + use frame_support::sp_runtime::{ + testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + Perbill, RuntimeAppPublic, + }; + use frame_support::{parameter_types, weights::Weight, BasicExternalities}; + use sp_core::H256; + + type AccountId = u64; + + type Block = frame_system::mocking::MockBlock; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + + frame_support::construct_runtime! { + pub enum TestRuntime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet}, + } + } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Config for TestRuntime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type BaseCallFilter = (); + type SystemWeightInfo = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type SS58Prefix = (); + type OnSetCode = (); + } + + parameter_types! { + pub const Period: u64 = 1; + pub const Offset: u64 = 0; + } + + impl pallet_session::Config for TestRuntime { + type Event = (); + type ValidatorId = ::AccountId; + type ValidatorIdOf = ConvertInto; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = (); + type SessionHandler = TestSessionHandler; + type Keys = UintAuthorityId; + type DisabledValidatorsThreshold = (); + type WeightInfo = (); + } + + impl Config for TestRuntime {} + + pub struct TestSessionHandler; + impl pallet_session::SessionHandler for TestSessionHandler { + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; + + fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} + + fn on_new_session(_: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)]) {} + + fn on_disabled(_: usize) {} + } + + fn new_test_ext() -> TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + let keys = vec![ + (1, 1, UintAuthorityId(1)), + (2, 2, UintAuthorityId(2)), + (3, 3, UintAuthorityId(3)), + (4, 4, UintAuthorityId(4)), + (5, 5, UintAuthorityId(5)), + ]; + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref k, ..) in &keys { + frame_system::Pallet::::inc_providers(k); + } + }); + + pallet_session::GenesisConfig:: { keys } + .assimilate_storage(&mut t) + .unwrap(); + TestExternalities::new(t) + } + + #[test] + fn shift_session_manager_works() { + new_test_ext().execute_with(|| { + let all_accs = vec![1, 2, 3, 4, 5]; + + // at least 1 validator is selected + assert_eq!(Pallet::::select_validators(0, &[1]), vec![1],); + + // at session#0, shift is also 0 + assert_eq!(Pallet::::select_validators(0, &all_accs), vec![1, 2, 3],); + + // at session#1, shift is also 1 + assert_eq!(Pallet::::select_validators(1, &all_accs), vec![2, 3, 4],); + + // at session#3, we're wrapping + assert_eq!(Pallet::::select_validators(3, &all_accs), vec![4, 5, 1],); + + // at session#5, we're starting from the beginning again + assert_eq!(Pallet::::select_validators(5, &all_accs), vec![1, 2, 3],); + }); + } +} diff --git a/polkadot/primitives/chain-kusama/Cargo.toml b/polkadot/primitives/chain-kusama/Cargo.toml new file mode 100644 index 00000000000..70ff3b844df --- /dev/null +++ b/polkadot/primitives/chain-kusama/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "bp-kusama" +description = "Primitives of Kusama runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] + +# Bridge Dependencies +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-polkadot-core/std", + "bp-runtime/std", + "sp-api/std", + "sp-std/std", +] diff --git a/polkadot/primitives/chain-kusama/src/lib.rs b/polkadot/primitives/chain-kusama/src/lib.rs new file mode 100644 index 00000000000..7163d15ef13 --- /dev/null +++ b/polkadot/primitives/chain-kusama/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use sp_std::prelude::*; + +pub use bp_polkadot_core::*; + +/// Kusama Chain +pub type Kusama = PolkadotLike; + +// We use this to get the account on Kusama (target) which is derived from Polkadot's (source) +// account. +pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::POLKADOT_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `KusamaFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_best_finalized"; +/// Name of the `KusamaFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_is_known_header"; + +/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToKusamaOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_KUSAMA_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToKusamaOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToKusamaOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToKusamaOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromKusamaInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_received_nonce"; +/// Name of the `FromKusamaInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromKusamaInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str = "FromKusamaInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Kusama headers. + /// + /// This API is implemented by runtimes that are bridging with the Kusama chain, not the + /// Kusama runtime itself. + pub trait KusamaFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Kusama chain. + /// + /// This API is implemented by runtimes that are sending messages to Kusama chain, not the + /// Kusama runtime itself. + pub trait ToKusamaOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Kusama from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Kusama chain. + /// + /// This API is implemented by runtimes that are receiving messages from Kusama chain, not the + /// Kusama runtime itself. + pub trait FromKusamaInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/primitives/chain-millau/Cargo.toml b/polkadot/primitives/chain-millau/Cargo.toml new file mode 100644 index 00000000000..67db08c2086 --- /dev/null +++ b/polkadot/primitives/chain-millau/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "bp-millau" +description = "Primitives of Millau runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] + +# Bridge Dependencies + +bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } +fixed-hash = { version = "0.7.0", default-features = false } +hash256-std-hasher = { version = "0.15.2", default-features = false } +impl-codec = { version = "0.5.0", default-features = false } +impl-serde = { version = "0.3.1", optional = true } +parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } +serde = { version = "1.0.101", optional = true, features = ["derive"] } + +# Substrate Based Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-runtime/std", + "fixed-hash/std", + "frame-support/std", + "frame-system/std", + "hash256-std-hasher/std", + "impl-codec/std", + "impl-serde", + "parity-util-mem/std", + "serde", + "sp-api/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-trie/std", +] diff --git a/polkadot/primitives/chain-millau/src/lib.rs b/polkadot/primitives/chain-millau/src/lib.rs new file mode 100644 index 00000000000..22f09cb5b09 --- /dev/null +++ b/polkadot/primitives/chain-millau/src/lib.rs @@ -0,0 +1,359 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_With_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +mod millau_hash; + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_runtime::Chain; +use frame_support::{ + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, + Parameter, RuntimeDebug, +}; +use frame_system::limits; +use sp_core::Hasher as HasherT; +use sp_runtime::traits::Convert; +use sp_runtime::{ + traits::{IdentifyAccount, Verify}, + MultiSignature, MultiSigner, Perbill, +}; +use sp_std::prelude::*; +use sp_trie::{trie_types::Layout, TrieConfiguration}; + +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; + +pub use millau_hash::MillauHash; + +/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at +/// Millau chain. This mostly depends on number of entries (and their density) in the storage trie. +/// Some reserve is reserved to account future chain growth. +pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; + +/// Number of bytes, included in the signed Millau transaction apart from the encoded call itself. +/// +/// Can be computed by subtracting encoded call size from raw transaction size. +pub const TX_EXTRA_BYTES: u32 = 103; + +/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. +pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; + +/// Maximum weight of single Millau block. +/// +/// This represents 0.5 seconds of compute assuming a target block time of six seconds. +pub const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND / 2; + +/// Represents the average portion of a block's weight that will be used by an +/// `on_initialize()` runtime call. +pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); + +/// Represents the portion of a block that will be used by Normal extrinsics. +pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +/// Maximal number of unrewarded relayer entries at inbound lane. +pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 1024; + +/// Maximal number of unconfirmed messages at inbound lane. +pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024; + +/// Weight of single regular message delivery transaction on Millau chain. +/// +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call +/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. +/// The message must have dispatch weight set to zero. The result then must be rounded up to account +/// possible future runtime upgrades. +pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; + +/// Increase of delivery transaction weight on Millau chain with every additional message byte. +/// +/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The +/// result then must be rounded up to account possible future runtime upgrades. +pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; + +/// Maximal weight of single message delivery confirmation transaction on Millau chain. +/// +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation +/// for the case when single message is confirmed. The result then must be rounded up to account possible future +/// runtime upgrades. +pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; + +/// The target length of a session (how often authorities change) on Millau measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. +pub const SESSION_LENGTH: BlockNumber = 5 * time_units::MINUTES; + +/// Re-export `time_units` to make usage easier. +pub use time_units::*; + +/// Human readable time units defined in terms of number of blocks. +pub mod time_units { + use super::BlockNumber; + + pub const MILLISECS_PER_BLOCK: u64 = 6000; + pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + + pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; +} + +/// Block number type used in Millau. +pub type BlockNumber = u64; + +/// Hash type used in Millau. +pub type Hash = ::Out; + +/// The type of an object that can produce hashes on Millau. +pub type Hasher = BlakeTwoAndKeccak256; + +/// The header type used by Millau. +pub type Header = sp_runtime::generic::Header; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// Public key of the chain account that may be used to verify signatures. +pub type AccountSigner = MultiSigner; + +/// Balance of an account. +pub type Balance = u64; + +/// Millau chain. +#[derive(RuntimeDebug)] +pub struct Millau; + +impl Chain for Millau { + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hasher = Hasher; + type Header = Header; +} + +/// Millau Hasher (Blake2-256 ++ Keccak-256) implementation. +#[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct BlakeTwoAndKeccak256; + +impl sp_core::Hasher for BlakeTwoAndKeccak256 { + type Out = MillauHash; + type StdHasher = hash256_std_hasher::Hash256StdHasher; + const LENGTH: usize = 64; + + fn hash(s: &[u8]) -> Self::Out { + let mut combined_hash = MillauHash::default(); + combined_hash.as_mut()[..32].copy_from_slice(&sp_io::hashing::blake2_256(s)); + combined_hash.as_mut()[32..].copy_from_slice(&sp_io::hashing::keccak_256(s)); + combined_hash + } +} + +impl sp_runtime::traits::Hash for BlakeTwoAndKeccak256 { + type Output = MillauHash; + + fn trie_root(input: Vec<(Vec, Vec)>) -> Self::Output { + Layout::::trie_root(input) + } + + fn ordered_trie_root(input: Vec>) -> Self::Output { + Layout::::ordered_trie_root(input) + } +} + +/// Convert a 256-bit hash into an AccountId. +pub struct AccountIdConverter; + +impl sp_runtime::traits::Convert for AccountIdConverter { + fn convert(hash: sp_core::H256) -> AccountId { + hash.to_fixed_bytes().into() + } +} + +/// We use this to get the account on Millau (target) which is derived from Rialto's (source) +/// account. We do this so we can fund the derived account on Millau at Genesis to it can pay +/// transaction fees. +/// +/// The reason we can use the same `AccountId` type for both chains is because they share the same +/// development seed phrase. +/// +/// Note that this should only be used for testing. +pub fn derive_account_from_rialto_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::RIALTO_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +frame_support::parameter_types! { + pub BlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(2 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + // Allowance for Normal class + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + // Allowance for Operational class + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Extra reserved space for Operational class + weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + // By default Mandatory class is not limited at all. + // This parameter is used to derive maximal size of a single extrinsic. + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +/// Get the maximum weight (compute time) that a Normal extrinsic on the Millau chain can use. +pub fn max_extrinsic_weight() -> Weight { + BlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic + .unwrap_or(Weight::MAX) +} + +/// Get the maximum length in bytes that a Normal extrinsic on the Millau chain requires. +pub fn max_extrinsic_size() -> u32 { + *BlockLength::get().max.get(DispatchClass::Normal) +} + +/// Name of the `MillauFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_finalized"; + +/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToMillauOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToMillauOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToMillauOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_received_nonce"; +/// Name of the `ToMillauOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_generated_nonce"; + +/// Name of the `FromMillauInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_received_nonce"; +/// Name of the `FromMillauInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromMillauInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str = "FromMillauInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about Millau headers from the Bridge Pallet instance. + /// + /// This API is implemented by runtimes that are bridging with the Millau chain, not the + /// Millau runtime itself. + pub trait MillauHeaderApi { + /// Returns number and hash of the best blocks known to the bridge module. + /// + /// Will return multiple headers if there are many headers at the same "best" height. + /// + /// The caller should only submit an `import_header` transaction that makes + /// (or leads to making) other header the best one. + fn best_blocks() -> Vec<(BlockNumber, Hash)>; + /// Returns number and hash of the best finalized block known to the bridge module. + fn finalized_block() -> (BlockNumber, Hash); + /// Returns numbers and hashes of headers that require finality proofs. + /// + /// An empty response means that there are no headers which currently require a + /// finality proof. + fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; + /// Returns true if the header is known to the runtime. + fn is_known_block(hash: Hash) -> bool; + /// Returns true if the header is considered finalized by the runtime. + fn is_finalized_block(hash: Hash) -> bool; + } + + /// API for querying information about the finalized Millau headers. + /// + /// This API is implemented by runtimes that are bridging with the Millau chain, not the + /// Millau runtime itself. + pub trait MillauFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Millau chain. + /// + /// This API is implemented by runtimes that are sending messages to Millau chain, not the + /// Millau runtime itself. + pub trait ToMillauOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Millau from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Millau chain. + /// + /// This API is implemented by runtimes that are receiving messages from Millau chain, not the + /// Millau runtime itself. + pub trait FromMillauInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::codec::Encode; + + #[test] + fn maximal_account_size_does_not_overflow_constant() { + assert!( + MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize >= AccountId::default().encode().len(), + "Actual maximal size of encoded AccountId ({}) overflows expected ({})", + AccountId::default().encode().len(), + MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + ); + } +} diff --git a/polkadot/primitives/chain-millau/src/millau_hash.rs b/polkadot/primitives/chain-millau/src/millau_hash.rs new file mode 100644 index 00000000000..936791217af --- /dev/null +++ b/polkadot/primitives/chain-millau/src/millau_hash.rs @@ -0,0 +1,57 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use parity_util_mem::MallocSizeOf; +use sp_runtime::traits::CheckEqual; + +// `sp_core::H512` can't be used, because it doesn't implement `CheckEqual`, which is required +// by `frame_system::Config::Hash`. + +fixed_hash::construct_fixed_hash! { + /// Hash type used in Millau chain. + #[derive(MallocSizeOf)] + pub struct MillauHash(64); +} + +#[cfg(feature = "std")] +impl_serde::impl_fixed_hash_serde!(MillauHash, 64); + +impl_codec::impl_fixed_hash_codec!(MillauHash, 64); + +impl CheckEqual for MillauHash { + #[cfg(feature = "std")] + fn check_equal(&self, other: &Self) { + use sp_core::hexdisplay::HexDisplay; + if self != other { + println!( + "Hash: given={}, expected={}", + HexDisplay::from(self.as_fixed_bytes()), + HexDisplay::from(other.as_fixed_bytes()), + ); + } + } + + #[cfg(not(feature = "std"))] + fn check_equal(&self, other: &Self) { + use frame_support::Printable; + + if self != other { + "Hash not equal".print(); + self.as_bytes().print(); + other.as_bytes().print(); + } + } +} diff --git a/polkadot/primitives/chain-polkadot/Cargo.toml b/polkadot/primitives/chain-polkadot/Cargo.toml new file mode 100644 index 00000000000..22ded41b914 --- /dev/null +++ b/polkadot/primitives/chain-polkadot/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "bp-polkadot" +description = "Primitives of Polkadot runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] + +# Bridge Dependencies +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies + +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-polkadot-core/std", + "bp-runtime/std", + "sp-api/std", + "sp-std/std", +] diff --git a/polkadot/primitives/chain-polkadot/src/lib.rs b/polkadot/primitives/chain-polkadot/src/lib.rs new file mode 100644 index 00000000000..8e0d30cdb60 --- /dev/null +++ b/polkadot/primitives/chain-polkadot/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use sp_std::prelude::*; + +pub use bp_polkadot_core::*; + +/// Polkadot Chain +pub type Polkadot = PolkadotLike; + +// We use this to get the account on Polkadot (target) which is derived from Kusama's (source) +// account. +pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::KUSAMA_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `PolkadotFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_best_finalized"; +/// Name of the `PolkadotFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_is_known_header"; + +/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToPolkadotOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_POLKADOT_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToPolkadotOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToPolkadotOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToPolkadotOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromPolkadotInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_received_nonce"; +/// Name of the `FromPolkadotInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromPolkadotInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str = "FromPolkadotInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Polkadot headers. + /// + /// This API is implemented by runtimes that are bridging with the Polkadot chain, not the + /// Polkadot runtime itself. + pub trait PolkadotFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Polkadot chain. + /// + /// This API is implemented by runtimes that are sending messages to Polkadot chain, not the + /// Polkadot runtime itself. + pub trait ToPolkadotOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Polkadot from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Polkadot chain. + /// + /// This API is implemented by runtimes that are receiving messages from Polkadot chain, not the + /// Polkadot runtime itself. + pub trait FromPolkadotInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/primitives/chain-rialto/Cargo.toml b/polkadot/primitives/chain-rialto/Cargo.toml new file mode 100644 index 00000000000..7e039a40acd --- /dev/null +++ b/polkadot/primitives/chain-rialto/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "bp-rialto" +description = "Primitives of Rialto runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] + +# Bridge Dependencies + +bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-runtime/std", + "frame-support/std", + "frame-system/std", + "sp-api/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/polkadot/primitives/chain-rialto/src/lib.rs b/polkadot/primitives/chain-rialto/src/lib.rs new file mode 100644 index 00000000000..c10f31bae33 --- /dev/null +++ b/polkadot/primitives/chain-rialto/src/lib.rs @@ -0,0 +1,320 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_With_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_runtime::Chain; +use frame_support::{ + weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight}, + Parameter, RuntimeDebug, +}; +use frame_system::limits; +use sp_core::Hasher as HasherT; +use sp_runtime::{ + traits::{BlakeTwo256, Convert, IdentifyAccount, Verify}, + MultiSignature, MultiSigner, Perbill, +}; +use sp_std::prelude::*; + +/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at +/// Rialto chain. This mostly depends on number of entries (and their density) in the storage trie. +/// Some reserve is reserved to account future chain growth. +pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; + +/// Number of bytes, included in the signed Rialto transaction apart from the encoded call itself. +/// +/// Can be computed by subtracting encoded call size from raw transaction size. +pub const TX_EXTRA_BYTES: u32 = 103; + +/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. +pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; + +/// Maximal weight of single Rialto block. +/// +/// This represents two seconds of compute assuming a target block time of six seconds. +pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + +/// Represents the average portion of a block's weight that will be used by an +/// `on_initialize()` runtime call. +pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); + +/// Represents the portion of a block that will be used by Normal extrinsics. +pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +/// Maximal number of unrewarded relayer entries at inbound lane. +pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; + +/// Maximal number of unconfirmed messages at inbound lane. +pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128; + +/// Weight of single regular message delivery transaction on Rialto chain. +/// +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call +/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. +/// The message must have dispatch weight set to zero. The result then must be rounded up to account +/// possible future runtime upgrades. +pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_000_000_000; + +/// Increase of delivery transaction weight on Rialto chain with every additional message byte. +/// +/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The +/// result then must be rounded up to account possible future runtime upgrades. +pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; + +/// Maximal weight of single message delivery confirmation transaction on Rialto chain. +/// +/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation +/// for the case when single message is confirmed. The result then must be rounded up to account possible future +/// runtime upgrades. +pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; + +/// The target length of a session (how often authorities change) on Rialto measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. +pub const SESSION_LENGTH: BlockNumber = 4; + +/// Re-export `time_units` to make usage easier. +pub use time_units::*; + +/// Human readable time units defined in terms of number of blocks. +pub mod time_units { + use super::BlockNumber; + + pub const MILLISECS_PER_BLOCK: u64 = 6000; + pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + + pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; +} + +/// Block number type used in Rialto. +pub type BlockNumber = u32; + +/// Hash type used in Rialto. +pub type Hash = ::Out; + +/// The type of an object that can produce hashes on Rialto. +pub type Hasher = BlakeTwo256; + +/// The header type used by Rialto. +pub type Header = sp_runtime::generic::Header; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// Public key of the chain account that may be used to verify signatures. +pub type AccountSigner = MultiSigner; + +/// Balance of an account. +pub type Balance = u128; + +/// Rialto chain. +#[derive(RuntimeDebug)] +pub struct Rialto; + +impl Chain for Rialto { + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hasher = Hasher; + type Header = Header; +} + +/// Convert a 256-bit hash into an AccountId. +pub struct AccountIdConverter; + +impl Convert for AccountIdConverter { + fn convert(hash: sp_core::H256) -> AccountId { + hash.to_fixed_bytes().into() + } +} + +// We use this to get the account on Rialto (target) which is derived from Millau's (source) +// account. We do this so we can fund the derived account on Rialto at Genesis to it can pay +// transaction fees. +// +// The reason we can use the same `AccountId` type for both chains is because they share the same +// development seed phrase. +// +// Note that this should only be used for testing. +pub fn derive_account_from_millau_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::MILLAU_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +frame_support::parameter_types! { + pub BlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + // Allowance for Normal class + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + // Allowance for Operational class + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Extra reserved space for Operational class + weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + // By default Mandatory class is not limited at all. + // This parameter is used to derive maximal size of a single extrinsic. + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +/// Get the maximum weight (compute time) that a Normal extrinsic on the Millau chain can use. +pub fn max_extrinsic_weight() -> Weight { + BlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic + .unwrap_or(Weight::MAX) +} + +/// Get the maximum length in bytes that a Normal extrinsic on the Millau chain requires. +pub fn max_extrinsic_size() -> u32 { + *BlockLength::get().max.get(DispatchClass::Normal) +} + +/// Name of the `RialtoFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_finalized"; + +/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToRialtoOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRialtoOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToRialtoOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToRialtoOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromRialtoInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_received_nonce"; +/// Name of the `FromRialtoInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromRialtoInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str = "FromRialtoInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about Rialto headers from the Bridge Pallet instance. + /// + /// This API is implemented by runtimes that are bridging with the Rialto chain, not the + /// Rialto runtime itself. + pub trait RialtoHeaderApi { + /// Returns number and hash of the best blocks known to the bridge module. + /// + /// Will return multiple headers if there are many headers at the same "best" height. + /// + /// The caller should only submit an `import_header` transaction that makes + /// (or leads to making) other header the best one. + fn best_blocks() -> Vec<(BlockNumber, Hash)>; + /// Returns number and hash of the best finalized block known to the bridge module. + fn finalized_block() -> (BlockNumber, Hash); + /// Returns numbers and hashes of headers that require finality proofs. + /// + /// An empty response means that there are no headers which currently require a + /// finality proof. + fn incomplete_headers() -> Vec<(BlockNumber, Hash)>; + /// Returns true if the header is known to the runtime. + fn is_known_block(hash: Hash) -> bool; + /// Returns true if the header is considered finalized by the runtime. + fn is_finalized_block(hash: Hash) -> bool; + } + + /// API for querying information about the finalized Rialto headers. + /// + /// This API is implemented by runtimes that are bridging with the Rialto chain, not the + /// Millau runtime itself. + pub trait RialtoFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Rialto chain. + /// + /// This API is implemented by runtimes that are sending messages to Rialto chain, not the + /// Rialto runtime itself. + pub trait ToRialtoOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Rialto from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Rialto chain. + /// + /// This API is implemented by runtimes that are receiving messages from Rialto chain, not the + /// Rialto runtime itself. + pub trait FromRialtoInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::codec::Encode; + + #[test] + fn maximal_account_size_does_not_overflow_constant() { + assert!( + MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize >= AccountId::default().encode().len(), + "Actual maximal size of encoded AccountId ({}) overflows expected ({})", + AccountId::default().encode().len(), + MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + ); + } +} diff --git a/polkadot/primitives/chain-rococo/Cargo.toml b/polkadot/primitives/chain-rococo/Cargo.toml new file mode 100644 index 00000000000..b97e8d9d1ab --- /dev/null +++ b/polkadot/primitives/chain-rococo/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "bp-rococo" +description = "Primitives of Rococo runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } + +# Bridge Dependencies +bp-header-chain = { path = "../header-chain", default-features = false } +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-header-chain/std", + "bp-messages/std", + "bp-polkadot-core/std", + "bp-runtime/std", + "parity-scale-codec/std", + "sp-api/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] diff --git a/polkadot/primitives/chain-rococo/src/lib.rs b/polkadot/primitives/chain-rococo/src/lib.rs new file mode 100644 index 00000000000..b79fdf6cfcf --- /dev/null +++ b/polkadot/primitives/chain-rococo/src/lib.rs @@ -0,0 +1,172 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_runtime::Chain; +use sp_std::prelude::*; +use sp_version::RuntimeVersion; + +pub use bp_polkadot_core::*; + +/// Rococo Chain +pub type Rococo = PolkadotLike; + +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("rococo"), + impl_name: sp_version::create_runtime_str!("parity-rococo-v1.5"), + authoring_version: 0, + spec_version: 231, + impl_version: 0, + apis: sp_version::create_apis_vec![[]], + transaction_version: 0, +}; + +/// Rococo Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Rococo chain. +/// Ideally this code would be auto-generated from Metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +pub enum Call { + /// Westend bridge pallet. + #[codec(index = 40)] + BridgeGrandpaWestend(BridgeGrandpaWestendCall), +} + +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeGrandpaWestendCall { + #[codec(index = 0)] + submit_finality_proof( + ::Header, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} + +// We use this to get the account on Rococo (target) which is derived from Westend's (source) +// account. +pub fn derive_account_from_westend_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::WESTEND_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `RococoFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_best_finalized"; +/// Name of the `RococoFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_is_known_header"; + +/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToRococoOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_ROCOCO_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToRococoOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToRococoOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToRococoOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromRococoInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_received_nonce"; +/// Name of the `FromRococoInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromRococoInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromRococoInboundLaneApi_unrewarded_relayers_state"; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Rococo headers. + /// + /// This API is implemented by runtimes that are bridging with the Rococo chain, not the + /// Rococo runtime itself. + pub trait RococoFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Rococo chain. + /// + /// This API is implemented by runtimes that are sending messages to Rococo chain, not the + /// Rococo runtime itself. + pub trait ToRococoOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Rococo from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Rococo chain. + /// + /// This API is implemented by runtimes that are receiving messages from Rococo chain, not the + /// Rococo runtime itself. + pub trait FromRococoInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/primitives/chain-westend/Cargo.toml b/polkadot/primitives/chain-westend/Cargo.toml new file mode 100644 index 00000000000..d5fda1ccef0 --- /dev/null +++ b/polkadot/primitives/chain-westend/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "bp-westend" +description = "Primitives of Westend runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } + +# Bridge Dependencies +bp-header-chain = { path = "../header-chain", default-features = false } +bp-messages = { path = "../messages", default-features = false } +bp-polkadot-core = { path = "../polkadot-core", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-header-chain/std", + "bp-messages/std", + "bp-polkadot-core/std", + "bp-runtime/std", + "parity-scale-codec/std", + "sp-api/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] diff --git a/polkadot/primitives/chain-westend/src/lib.rs b/polkadot/primitives/chain-westend/src/lib.rs new file mode 100644 index 00000000000..db97364ef41 --- /dev/null +++ b/polkadot/primitives/chain-westend/src/lib.rs @@ -0,0 +1,179 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Runtime-generated DecodeLimit::decode_all_with_depth_limit +#![allow(clippy::unnecessary_mut_passed)] + +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use bp_runtime::Chain; +use sp_std::prelude::*; +use sp_version::RuntimeVersion; + +pub use bp_polkadot_core::*; + +/// Westend Chain +pub type Westend = PolkadotLike; + +pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; + +// NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("westend"), + impl_name: sp_version::create_runtime_str!("parity-westend"), + authoring_version: 2, + spec_version: 51, + impl_version: 0, + apis: sp_version::create_apis_vec![[]], + transaction_version: 5, +}; + +/// Westend Runtime `Call` enum. +/// +/// The enum represents a subset of possible `Call`s we can send to Westend chain. +/// Ideally this code would be auto-generated from Metadata, because we want to +/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. +/// +/// All entries here (like pretty much in the entire file) must be kept in sync with Westend +/// `construct_runtime`, so that we maintain SCALE-compatibility. +/// +/// See: https://github.com/paritytech/polkadot/blob/master/runtime/westend/src/lib.rs +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +pub enum Call { + /// Rococo bridge pallet. + #[codec(index = 40)] + BridgeGrandpaRococo(BridgeGrandpaRococoCall), +} + +#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)] +#[allow(non_camel_case_types)] +pub enum BridgeGrandpaRococoCall { + #[codec(index = 0)] + submit_finality_proof( + ::Header, + bp_header_chain::justification::GrandpaJustification<::Header>, + ), + #[codec(index = 1)] + initialize(bp_header_chain::InitializationData<::Header>), +} + +impl sp_runtime::traits::Dispatchable for Call { + type Origin = (); + type Config = (); + type Info = (); + type PostInfo = (); + + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + unimplemented!("The Call is not expected to be dispatched.") + } +} + +// We use this to get the account on Westend (target) which is derived from Rococo's (source) +// account. +pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount) -> AccountId { + let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_BRIDGE_INSTANCE, id); + AccountIdConverter::convert(encoded_id) +} + +/// Name of the `WestendFinalityApi::best_finalized` runtime method. +pub const BEST_FINALIZED_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_best_finalized"; +/// Name of the `WestendFinalityApi::is_known_header` runtime method. +pub const IS_KNOWN_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_is_known_header"; + +/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method. +pub const TO_WESTEND_ESTIMATE_MESSAGE_FEE_METHOD: &str = + "ToWestendOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; +/// Name of the `ToWestendOutboundLaneApi::messages_dispatch_weight` runtime method. +pub const TO_WESTEND_MESSAGES_DISPATCH_WEIGHT_METHOD: &str = "ToWestendOutboundLaneApi_messages_dispatch_weight"; +/// Name of the `ToWestendOutboundLaneApi::latest_generated_nonce` runtime method. +pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_generated_nonce"; +/// Name of the `ToWestendOutboundLaneApi::latest_received_nonce` runtime method. +pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_received_nonce"; + +/// Name of the `FromWestendInboundLaneApi::latest_received_nonce` runtime method. +pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_received_nonce"; +/// Name of the `FromWestendInboundLaneApi::latest_onfirmed_nonce` runtime method. +pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_confirmed_nonce"; +/// Name of the `FromWestendInboundLaneApi::unrewarded_relayers_state` runtime method. +pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str = "FromWestendInboundLaneApi_unrewarded_relayers_state"; + +/// The target length of a session (how often authorities change) on Westend measured in of number of +/// blocks. +/// +/// Note that since this is a target sessions may change before/after this time depending on network +/// conditions. +pub const SESSION_LENGTH: BlockNumber = 10 * time_units::MINUTES; + +sp_api::decl_runtime_apis! { + /// API for querying information about the finalized Westend headers. + /// + /// This API is implemented by runtimes that are bridging with the Westend chain, not the + /// Westend runtime itself. + pub trait WestendFinalityApi { + /// Returns number and hash of the best finalized header known to the bridge module. + fn best_finalized() -> (BlockNumber, Hash); + /// Returns true if the header is known to the runtime. + fn is_known_header(hash: Hash) -> bool; + } + + /// Outbound message lane API for messages that are sent to Westend chain. + /// + /// This API is implemented by runtimes that are sending messages to Westend chain, not the + /// Westend runtime itself. + pub trait ToWestendOutboundLaneApi { + /// Estimate message delivery and dispatch fee that needs to be paid by the sender on + /// this chain. + /// + /// Returns `None` if message is too expensive to be sent to Westend from this chain. + /// + /// Please keep in mind that this method returns lowest message fee required for message + /// to be accepted to the lane. It may be good idea to pay a bit over this price to account + /// future exchange rate changes and guarantee that relayer would deliver your message + /// to the target chain. + fn estimate_message_delivery_and_dispatch_fee( + lane_id: LaneId, + payload: OutboundPayload, + ) -> Option; + /// Returns total dispatch weight and encoded payload size of all messages in given inclusive range. + /// + /// If some (or all) messages are missing from the storage, they'll also will + /// be missing from the resulting vector. The vector is ordered by the nonce. + fn messages_dispatch_weight( + lane: LaneId, + begin: MessageNonce, + end: MessageNonce, + ) -> Vec<(MessageNonce, Weight, u32)>; + /// Returns nonce of the latest message, received by bridged chain. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Returns nonce of the latest message, generated by given lane. + fn latest_generated_nonce(lane: LaneId) -> MessageNonce; + } + + /// Inbound message lane API for messages sent by Westend chain. + /// + /// This API is implemented by runtimes that are receiving messages from Westend chain, not the + /// Westend runtime itself. + pub trait FromWestendInboundLaneApi { + /// Returns nonce of the latest message, received by given lane. + fn latest_received_nonce(lane: LaneId) -> MessageNonce; + /// Nonce of latest message that has been confirmed to the bridged chain. + fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce; + /// State of the unrewarded relayers set at given lane. + fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState; + } +} diff --git a/polkadot/primitives/currency-exchange/Cargo.toml b/polkadot/primitives/currency-exchange/Cargo.toml new file mode 100644 index 00000000000..43367ba7992 --- /dev/null +++ b/polkadot/primitives/currency-exchange/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "bp-currency-exchange" +description = "Primitives of currency exchange module." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "sp-api/std", + "sp-std/std", +] diff --git a/polkadot/primitives/currency-exchange/src/lib.rs b/polkadot/primitives/currency-exchange/src/lib.rs new file mode 100644 index 00000000000..88695dbb5ef --- /dev/null +++ b/polkadot/primitives/currency-exchange/src/lib.rs @@ -0,0 +1,150 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Generated by `DecodeLimit::decode_with_depth_limit` +#![allow(clippy::unnecessary_mut_passed)] + +use codec::{Decode, Encode, EncodeLike}; +use frame_support::{Parameter, RuntimeDebug}; +use sp_api::decl_runtime_apis; +use sp_std::marker::PhantomData; + +/// All errors that may happen during exchange. +#[derive(RuntimeDebug, PartialEq)] +pub enum Error { + /// Invalid peer blockchain transaction provided. + InvalidTransaction, + /// Peer transaction has invalid amount. + InvalidAmount, + /// Peer transaction has invalid recipient. + InvalidRecipient, + /// Cannot map from peer recipient to this blockchain recipient. + FailedToMapRecipients, + /// Failed to convert from peer blockchain currency to this blockhain currency. + FailedToConvertCurrency, + /// Deposit has failed. + DepositFailed, + /// Deposit has partially failed (changes to recipient account were made). + DepositPartiallyFailed, +} + +/// Result of all exchange operations. +pub type Result = sp_std::result::Result; + +/// Peer blockchain lock funds transaction. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] +pub struct LockFundsTransaction { + /// Something that uniquely identifies this transfer. + pub id: TransferId, + /// Funds recipient on the peer chain. + pub recipient: Recipient, + /// Amount of the locked funds. + pub amount: Amount, +} + +/// Peer blockchain transaction that may represent lock funds transaction. +pub trait MaybeLockFundsTransaction { + /// Transaction type. + type Transaction; + /// Identifier that uniquely identifies this transfer. + type Id: Decode + Encode + EncodeLike + sp_std::fmt::Debug; + /// Peer recipient type. + type Recipient; + /// Peer currency amount type. + type Amount; + + /// Parse lock funds transaction of the peer blockchain. Returns None if + /// transaction format is unknown, or it isn't a lock funds transaction. + fn parse(tx: &Self::Transaction) -> Result>; +} + +/// Map that maps recipients from peer blockchain to this blockchain recipients. +pub trait RecipientsMap { + /// Peer blockchain recipient type. + type PeerRecipient; + /// Current blockchain recipient type. + type Recipient; + + /// Lookup current blockchain recipient by peer blockchain recipient. + fn map(peer_recipient: Self::PeerRecipient) -> Result; +} + +/// Conversion between two currencies. +pub trait CurrencyConverter { + /// Type of the source currency amount. + type SourceAmount; + /// Type of the target currency amount. + type TargetAmount; + + /// Covert from source to target currency. + fn convert(amount: Self::SourceAmount) -> Result; +} + +/// Currency deposit. +pub trait DepositInto { + /// Recipient type. + type Recipient; + /// Currency amount type. + type Amount; + + /// Grant some money to given account. + fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> Result<()>; +} + +/// Recipients map which is used when accounts ids are the same on both chains. +#[derive(Debug)] +pub struct IdentityRecipients(PhantomData); + +impl RecipientsMap for IdentityRecipients { + type PeerRecipient = AccountId; + type Recipient = AccountId; + + fn map(peer_recipient: Self::PeerRecipient) -> Result { + Ok(peer_recipient) + } +} + +/// Currency converter which is used when currency is the same on both chains. +#[derive(Debug)] +pub struct IdentityCurrencyConverter(PhantomData); + +impl CurrencyConverter for IdentityCurrencyConverter { + type SourceAmount = Amount; + type TargetAmount = Amount; + + fn convert(currency: Self::SourceAmount) -> Result { + Ok(currency) + } +} + +decl_runtime_apis! { + /// API for Rialto exchange transactions submitters. + pub trait RialtoCurrencyExchangeApi { + /// Returns true if currency exchange module is able to import transaction proof in + /// its current state. + fn filter_transaction_proof(proof: Proof) -> bool; + } + + /// API for Kovan exchange transactions submitters. + pub trait KovanCurrencyExchangeApi { + /// Returns true if currency exchange module is able to import transaction proof in + /// its current state. + fn filter_transaction_proof(proof: Proof) -> bool; + } +} diff --git a/polkadot/primitives/ethereum-poa/Cargo.toml b/polkadot/primitives/ethereum-poa/Cargo.toml new file mode 100644 index 00000000000..cd2c3a97a0f --- /dev/null +++ b/polkadot/primitives/ethereum-poa/Cargo.toml @@ -0,0 +1,57 @@ +[package] +name = "bp-eth-poa" +description = "Primitives of Ethereum PoA Bridge module." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +ethbloom = { version = "0.10.0", default-features = false, features = ["rlp"] } +fixed-hash = { version = "0.7", default-features = false } +hash-db = { version = "0.15.2", default-features = false } +impl-rlp = { version = "0.3", default-features = false } +impl-serde = { version = "0.3.1", optional = true } +libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } +parity-bytes = { version = "0.1", default-features = false } +plain_hasher = { version = "0.2.2", default-features = false } +primitive-types = { version = "0.9", default-features = false, features = ["codec", "rlp"] } +rlp = { version = "0.5", default-features = false } +serde = { version = "1.0", optional = true } +serde-big-array = { version = "0.2", optional = true } +triehash = { version = "0.8.2", default-features = false } + +# Substrate Dependencies + +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +hex-literal = "0.2" + +[features] +default = ["std"] +std = [ + "codec/std", + "ethbloom/std", + "fixed-hash/std", + "hash-db/std", + "impl-rlp/std", + "impl-serde", + "libsecp256k1/std", + "parity-bytes/std", + "plain_hasher/std", + "primitive-types/std", + "primitive-types/serde", + "rlp/std", + "serde/std", + "serde-big-array", + "sp-api/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "triehash/std", +] diff --git a/polkadot/primitives/ethereum-poa/src/lib.rs b/polkadot/primitives/ethereum-poa/src/lib.rs new file mode 100644 index 00000000000..57c539f2e27 --- /dev/null +++ b/polkadot/primitives/ethereum-poa/src/lib.rs @@ -0,0 +1,734 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Generated by `DecodeLimit::decode_with_depth_limit` +#![allow(clippy::unnecessary_mut_passed)] + +pub use parity_bytes::Bytes; +pub use primitive_types::{H160, H256, H512, U128, U256}; +pub use rlp::encode as rlp_encode; + +use codec::{Decode, Encode}; +use ethbloom::{Bloom as EthBloom, Input as BloomInput}; +use fixed_hash::construct_fixed_hash; +use rlp::{Decodable, DecoderError, Rlp, RlpStream}; +use sp_io::hashing::keccak_256; +use sp_runtime::RuntimeDebug; +use sp_std::prelude::*; + +use impl_rlp::impl_fixed_hash_rlp; +#[cfg(feature = "std")] +use impl_serde::impl_fixed_hash_serde; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +#[cfg(feature = "std")] +use serde_big_array::big_array; + +construct_fixed_hash! { pub struct H520(65); } +impl_fixed_hash_rlp!(H520, 65); +#[cfg(feature = "std")] +impl_fixed_hash_serde!(H520, 65); + +/// Raw (RLP-encoded) ethereum transaction. +pub type RawTransaction = Vec; + +/// Raw (RLP-encoded) ethereum transaction receipt. +pub type RawTransactionReceipt = Vec; + +/// An ethereum address. +pub type Address = H160; + +pub mod signatures; + +/// Complete header id. +#[derive(Encode, Decode, Default, RuntimeDebug, PartialEq, Clone, Copy)] +pub struct HeaderId { + /// Header number. + pub number: u64, + /// Header hash. + pub hash: H256, +} + +/// An Aura header. +#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct AuraHeader { + /// Parent block hash. + pub parent_hash: H256, + /// Block timestamp. + pub timestamp: u64, + /// Block number. + pub number: u64, + /// Block author. + pub author: Address, + + /// Transactions root. + pub transactions_root: H256, + /// Block uncles hash. + pub uncles_hash: H256, + /// Block extra data. + pub extra_data: Bytes, + + /// State root. + pub state_root: H256, + /// Block receipts root. + pub receipts_root: H256, + /// Block bloom. + pub log_bloom: Bloom, + /// Gas used for contracts execution. + pub gas_used: U256, + /// Block gas limit. + pub gas_limit: U256, + + /// Block difficulty. + pub difficulty: U256, + /// Vector of post-RLP-encoded fields. + pub seal: Vec, +} + +/// Parsed ethereum transaction. +#[derive(PartialEq, RuntimeDebug)] +pub struct Transaction { + /// Sender address. + pub sender: Address, + /// Unsigned portion of ethereum transaction. + pub unsigned: UnsignedTransaction, +} + +/// Unsigned portion of ethereum transaction. +#[derive(Clone, PartialEq, RuntimeDebug)] +pub struct UnsignedTransaction { + /// Sender nonce. + pub nonce: U256, + /// Gas price. + pub gas_price: U256, + /// Gas limit. + pub gas: U256, + /// Transaction destination address. None if it is contract creation transaction. + pub to: Option
, + /// Value. + pub value: U256, + /// Associated data. + pub payload: Bytes, +} + +/// Information describing execution of a transaction. +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] +pub struct Receipt { + /// The total gas used in the block following execution of the transaction. + pub gas_used: U256, + /// The OR-wide combination of all logs' blooms for this transaction. + pub log_bloom: Bloom, + /// The logs stemming from this transaction. + pub logs: Vec, + /// Transaction outcome. + pub outcome: TransactionOutcome, +} + +/// Transaction outcome store in the receipt. +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] +pub enum TransactionOutcome { + /// Status and state root are unknown under EIP-98 rules. + Unknown, + /// State root is known. Pre EIP-98 and EIP-658 rules. + StateRoot(H256), + /// Status code is known. EIP-658 rules. + StatusCode(u8), +} + +/// A record of execution for a `LOG` operation. +#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)] +pub struct LogEntry { + /// The address of the contract executing at the point of the `LOG` operation. + pub address: Address, + /// The topics associated with the `LOG` operation. + pub topics: Vec, + /// The data associated with the `LOG` operation. + pub data: Bytes, +} + +/// Logs bloom. +#[derive(Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct Bloom(#[cfg_attr(feature = "std", serde(with = "BigArray"))] [u8; 256]); + +#[cfg(feature = "std")] +big_array! { BigArray; } + +/// An empty step message that is included in a seal, the only difference is that it doesn't include +/// the `parent_hash` in order to save space. The included signature is of the original empty step +/// message, which can be reconstructed by using the parent hash of the block in which this sealed +/// empty message is included. +pub struct SealedEmptyStep { + /// Signature of the original message author. + pub signature: H520, + /// The step this message is generated for. + pub step: u64, +} + +impl AuraHeader { + /// Compute id of this header. + pub fn compute_id(&self) -> HeaderId { + HeaderId { + number: self.number, + hash: self.compute_hash(), + } + } + + /// Compute hash of this header (keccak of the RLP with seal). + pub fn compute_hash(&self) -> H256 { + keccak_256(&self.rlp(true)).into() + } + + /// Get id of this header' parent. Returns None if this is genesis header. + pub fn parent_id(&self) -> Option { + self.number.checked_sub(1).map(|parent_number| HeaderId { + number: parent_number, + hash: self.parent_hash, + }) + } + + /// Check if passed transactions receipts are matching receipts root in this header. + /// Returns Ok(computed-root) if check succeeds. + /// Returns Err(computed-root) if check fails. + pub fn check_receipts_root(&self, receipts: &[Receipt]) -> Result { + check_merkle_proof(self.receipts_root, receipts.iter().map(|r| r.rlp())) + } + + /// Check if passed raw transactions receipts are matching receipts root in this header. + /// Returns Ok(computed-root) if check succeeds. + /// Returns Err(computed-root) if check fails. + pub fn check_raw_receipts_root<'a>( + &self, + receipts: impl IntoIterator, + ) -> Result { + check_merkle_proof(self.receipts_root, receipts.into_iter()) + } + + /// Check if passed transactions are matching transactions root in this header. + /// Returns Ok(computed-root) if check succeeds. + /// Returns Err(computed-root) if check fails. + pub fn check_transactions_root<'a>( + &self, + transactions: impl IntoIterator, + ) -> Result { + check_merkle_proof(self.transactions_root, transactions.into_iter()) + } + + /// Gets the seal hash of this header. + pub fn seal_hash(&self, include_empty_steps: bool) -> Option { + Some(match include_empty_steps { + true => { + let mut message = self.compute_hash().as_bytes().to_vec(); + message.extend_from_slice(self.seal.get(2)?); + keccak_256(&message).into() + } + false => keccak_256(&self.rlp(false)).into(), + }) + } + + /// Get step this header is generated for. + pub fn step(&self) -> Option { + self.seal.get(0).map(|x| Rlp::new(&x)).and_then(|x| x.as_val().ok()) + } + + /// Get header author' signature. + pub fn signature(&self) -> Option { + self.seal.get(1).and_then(|x| Rlp::new(x).as_val().ok()) + } + + /// Extracts the empty steps from the header seal. + pub fn empty_steps(&self) -> Option> { + self.seal + .get(2) + .and_then(|x| Rlp::new(x).as_list::().ok()) + } + + /// Returns header RLP with or without seals. + fn rlp(&self, with_seal: bool) -> Bytes { + let mut s = RlpStream::new(); + if with_seal { + s.begin_list(13 + self.seal.len()); + } else { + s.begin_list(13); + } + + s.append(&self.parent_hash); + s.append(&self.uncles_hash); + s.append(&self.author); + s.append(&self.state_root); + s.append(&self.transactions_root); + s.append(&self.receipts_root); + s.append(&EthBloom::from(self.log_bloom.0)); + s.append(&self.difficulty); + s.append(&self.number); + s.append(&self.gas_limit); + s.append(&self.gas_used); + s.append(&self.timestamp); + s.append(&self.extra_data); + + if with_seal { + for b in &self.seal { + s.append_raw(b, 1); + } + } + + s.out().to_vec() + } +} + +impl UnsignedTransaction { + /// Decode unsigned portion of raw transaction RLP. + pub fn decode_rlp(raw_tx: &[u8]) -> Result { + let tx_rlp = Rlp::new(raw_tx); + let to = tx_rlp.at(3)?; + Ok(UnsignedTransaction { + nonce: tx_rlp.val_at(0)?, + gas_price: tx_rlp.val_at(1)?, + gas: tx_rlp.val_at(2)?, + to: match to.is_empty() { + false => Some(to.as_val()?), + true => None, + }, + value: tx_rlp.val_at(4)?, + payload: tx_rlp.val_at(5)?, + }) + } + + /// Returns message that has to be signed to sign this transaction. + pub fn message(&self, chain_id: Option) -> H256 { + keccak_256(&self.rlp(chain_id)).into() + } + + /// Returns unsigned transaction RLP. + pub fn rlp(&self, chain_id: Option) -> Bytes { + let mut stream = RlpStream::new_list(if chain_id.is_some() { 9 } else { 6 }); + self.rlp_to(chain_id, &mut stream); + stream.out().to_vec() + } + + /// Encode to given rlp stream. + pub fn rlp_to(&self, chain_id: Option, stream: &mut RlpStream) { + stream.append(&self.nonce); + stream.append(&self.gas_price); + stream.append(&self.gas); + match self.to { + Some(to) => stream.append(&to), + None => stream.append(&""), + }; + stream.append(&self.value); + stream.append(&self.payload); + if let Some(chain_id) = chain_id { + stream.append(&chain_id); + stream.append(&0u8); + stream.append(&0u8); + } + } +} + +impl Receipt { + /// Decode status from raw transaction receipt RLP. + pub fn is_successful_raw_receipt(raw_receipt: &[u8]) -> Result { + let rlp = Rlp::new(raw_receipt); + if rlp.item_count()? == 3 { + // no outcome - invalid tx? + Ok(false) + } else { + let first = rlp.at(0)?; + if first.is_data() && first.data()?.len() <= 1 { + // EIP-658 transaction - status of successful transaction is 1 + let status: u8 = first.as_val()?; + Ok(status == 1) + } else { + // pre-EIP-658 transaction - we do not support this kind of transactions + Ok(false) + } + } + } + + /// Returns receipt RLP. + pub fn rlp(&self) -> Bytes { + let mut s = RlpStream::new(); + match self.outcome { + TransactionOutcome::Unknown => { + s.begin_list(3); + } + TransactionOutcome::StateRoot(ref root) => { + s.begin_list(4); + s.append(root); + } + TransactionOutcome::StatusCode(ref status_code) => { + s.begin_list(4); + s.append(status_code); + } + } + s.append(&self.gas_used); + s.append(&EthBloom::from(self.log_bloom.0)); + + s.begin_list(self.logs.len()); + for log in &self.logs { + s.begin_list(3); + s.append(&log.address); + s.begin_list(log.topics.len()); + for topic in &log.topics { + s.append(topic); + } + s.append(&log.data); + } + + s.out().to_vec() + } +} + +impl SealedEmptyStep { + /// Returns message that has to be signed by the validator. + pub fn message(&self, parent_hash: &H256) -> H256 { + let mut message = RlpStream::new_list(2); + message.append(&self.step); + message.append(parent_hash); + keccak_256(&message.out()).into() + } + + /// Returns rlp for the vector of empty steps (we only do encoding in tests). + pub fn rlp_of(empty_steps: &[SealedEmptyStep]) -> Bytes { + let mut s = RlpStream::new(); + s.begin_list(empty_steps.len()); + for empty_step in empty_steps { + s.begin_list(2).append(&empty_step.signature).append(&empty_step.step); + } + s.out().to_vec() + } +} + +impl Decodable for SealedEmptyStep { + fn decode(rlp: &Rlp) -> Result { + let signature: H520 = rlp.val_at(0)?; + let step = rlp.val_at(1)?; + + Ok(SealedEmptyStep { signature, step }) + } +} + +impl LogEntry { + /// Calculates the bloom of this log entry. + pub fn bloom(&self) -> Bloom { + let eth_bloom = + self.topics + .iter() + .fold(EthBloom::from(BloomInput::Raw(self.address.as_bytes())), |mut b, t| { + b.accrue(BloomInput::Raw(t.as_bytes())); + b + }); + Bloom(*eth_bloom.data()) + } +} + +impl Bloom { + /// Returns true if this bloom has all bits from the other set. + pub fn contains(&self, other: &Bloom) -> bool { + self.0.iter().zip(other.0.iter()).all(|(l, r)| (l & r) == *r) + } +} + +impl<'a> From<&'a [u8; 256]> for Bloom { + fn from(buffer: &'a [u8; 256]) -> Bloom { + Bloom(*buffer) + } +} + +impl PartialEq for Bloom { + fn eq(&self, other: &Bloom) -> bool { + self.0.iter().zip(other.0.iter()).all(|(l, r)| l == r) + } +} + +impl Default for Bloom { + fn default() -> Self { + Bloom([0; 256]) + } +} + +#[cfg(feature = "std")] +impl std::fmt::Debug for Bloom { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("Bloom").finish() + } +} + +/// Decode Ethereum transaction. +pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result { + // parse transaction fields + let unsigned = UnsignedTransaction::decode_rlp(raw_tx)?; + let tx_rlp = Rlp::new(raw_tx); + let v: u64 = tx_rlp.val_at(6)?; + let r: U256 = tx_rlp.val_at(7)?; + let s: U256 = tx_rlp.val_at(8)?; + + // reconstruct signature + let mut signature = [0u8; 65]; + let (chain_id, v) = match v { + v if v == 27u64 => (None, 0), + v if v == 28u64 => (None, 1), + v if v >= 35u64 => (Some((v - 35) / 2), ((v - 1) % 2) as u8), + _ => (None, 4), + }; + r.to_big_endian(&mut signature[0..32]); + s.to_big_endian(&mut signature[32..64]); + signature[64] = v; + + // reconstruct message that has been signed + let message = unsigned.message(chain_id); + + // recover tx sender + let sender_public = sp_io::crypto::secp256k1_ecdsa_recover(&signature, &message.as_fixed_bytes()) + .map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?; + let sender_address = public_to_address(&sender_public); + + Ok(Transaction { + sender: sender_address, + unsigned, + }) +} + +/// Convert public key into corresponding ethereum address. +pub fn public_to_address(public: &[u8; 64]) -> Address { + let hash = keccak_256(public); + let mut result = Address::zero(); + result.as_bytes_mut().copy_from_slice(&hash[12..]); + result +} + +/// Check ethereum merkle proof. +/// Returns Ok(computed-root) if check succeeds. +/// Returns Err(computed-root) if check fails. +fn check_merkle_proof>(expected_root: H256, items: impl Iterator) -> Result { + let computed_root = compute_merkle_root(items); + if computed_root == expected_root { + Ok(computed_root) + } else { + Err(computed_root) + } +} + +/// Compute ethereum merkle root. +pub fn compute_merkle_root>(items: impl Iterator) -> H256 { + struct Keccak256Hasher; + + impl hash_db::Hasher for Keccak256Hasher { + type Out = H256; + type StdHasher = plain_hasher::PlainHasher; + const LENGTH: usize = 32; + fn hash(x: &[u8]) -> Self::Out { + keccak_256(x).into() + } + } + + triehash::ordered_trie_root::(items) +} + +/// Get validator that should author the block at given step. +pub fn step_validator(header_validators: &[T], header_step: u64) -> &T { + &header_validators[(header_step % header_validators.len() as u64) as usize] +} + +sp_api::decl_runtime_apis! { + /// API for querying information about headers from the Rialto Bridge Pallet + pub trait RialtoPoAHeaderApi { + /// Returns number and hash of the best block known to the bridge module. + /// + /// The caller should only submit an `import_header` transaction that makes + /// (or leads to making) other header the best one. + fn best_block() -> (u64, H256); + /// Returns number and hash of the best finalized block known to the bridge module. + fn finalized_block() -> (u64, H256); + /// Returns true if the import of given block requires transactions receipts. + fn is_import_requires_receipts(header: AuraHeader) -> bool; + /// Returns true if header is known to the runtime. + fn is_known_block(hash: H256) -> bool; + } + + /// API for querying information about headers from the Kovan Bridge Pallet + pub trait KovanHeaderApi { + /// Returns number and hash of the best block known to the bridge module. + /// + /// The caller should only submit an `import_header` transaction that makes + /// (or leads to making) other header the best one. + fn best_block() -> (u64, H256); + /// Returns number and hash of the best finalized block known to the bridge module. + fn finalized_block() -> (u64, H256); + /// Returns true if the import of given block requires transactions receipts. + fn is_import_requires_receipts(header: AuraHeader) -> bool; + /// Returns true if header is known to the runtime. + fn is_known_block(hash: H256) -> bool; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + fn transfer_transaction_decode_works() { + // value transfer transaction + // https://etherscan.io/tx/0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd + // https://etherscan.io/getRawTx?tx=0xb9d4ad5408f53eac8627f9ccd840ba8fb3469d55cd9cc2a11c6e049f1eef4edd + let raw_tx = hex!("f86c0a85046c7cfe0083016dea94d1310c1e038bc12865d3d3997275b3e4737c6302880b503be34d9fe80080269fc7eaaa9c21f59adf8ad43ed66cf5ef9ee1c317bd4d32cd65401e7aaca47cfaa0387d79c65b90be6260d09dcfb780f29dd8133b9b1ceb20b83b7e442b4bfc30cb"); + assert_eq!( + transaction_decode_rlp(&raw_tx), + Ok(Transaction { + sender: hex!("67835910d32600471f388a137bbff3eb07993c04").into(), + unsigned: UnsignedTransaction { + nonce: 10.into(), + gas_price: 19000000000u64.into(), + gas: 93674.into(), + to: Some(hex!("d1310c1e038bc12865d3d3997275b3e4737c6302").into()), + value: 815217380000000000_u64.into(), + payload: Default::default(), + } + }), + ); + + // Kovan value transfer transaction + // https://kovan.etherscan.io/tx/0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da + // https://kovan.etherscan.io/getRawTx?tx=0x3b4b7bd41c1178045ccb4753aa84c1ef9864b4d712fa308b228917cd837915da + let raw_tx = hex!("f86a822816808252089470c1ccde719d6f477084f07e4137ab0e55f8369f8930cf46e92063afd8008078a00e4d1f4d8aa992bda3c105ff3d6e9b9acbfd99facea00985e2131029290adbdca028ea29a46a4b66ec65b454f0706228e3768cb0ecf755f67c50ddd472f11d5994"); + assert_eq!( + transaction_decode_rlp(&raw_tx), + Ok(Transaction { + sender: hex!("faadface3fbd81ce37b0e19c0b65ff4234148132").into(), + unsigned: UnsignedTransaction { + nonce: 10262.into(), + gas_price: 0.into(), + gas: 21000.into(), + to: Some(hex!("70c1ccde719d6f477084f07e4137ab0e55f8369f").into()), + value: 900379597077600000000_u128.into(), + payload: Default::default(), + }, + }), + ); + } + + #[test] + fn payload_transaction_decode_works() { + // contract call transaction + // https://etherscan.io/tx/0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 + // https://etherscan.io/getRawTx?tx=0xdc2b996b4d1d6922bf6dba063bfd70913279cb6170967c9bb80252aeb061cf65 + let raw_tx = hex!("f8aa76850430e234008301500094dac17f958d2ee523a2206206994597c13d831ec780b844a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b4025a0964e023999621dc3d4d831c43c71f7555beb6d1192dee81a3674b3f57e310f21a00f229edd86f841d1ee4dc48cc16667e2283817b1d39bae16ced10cd206ae4fd4"); + assert_eq!( + transaction_decode_rlp(&raw_tx), + Ok(Transaction { + sender: hex!("2b9a4d37bdeecdf994c4c9ad7f3cf8dc632f7d70").into(), + unsigned: UnsignedTransaction { + nonce: 118.into(), + gas_price: 18000000000u64.into(), + gas: 86016.into(), + to: Some(hex!("dac17f958d2ee523a2206206994597c13d831ec7").into()), + value: 0.into(), + payload: hex!("a9059cbb000000000000000000000000e08f35f66867a454835b25118f1e490e7f9e9a7400000000000000000000000000000000000000000000000000000000004c4b40").to_vec(), + }, + }), + ); + + // Kovan contract call transaction + // https://kovan.etherscan.io/tx/0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf + // https://kovan.etherscan.io/getRawTx?tx=0x2904b4451d23665492239016b78da052d40d55fdebc7304b38e53cf6a37322cf + let raw_tx = hex!("f8ac8302200b843b9aca00830271009484dd11eb2a29615303d18149c0dbfa24167f896680b844a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b600000000000000000000000000000000000000000000000000000000000027101ba0ce126d2cca81f5e245f292ff84a0d915c0a4ac52af5c51219db1e5d36aa8da35a0045298b79dac631907403888f9b04c2ab5509fe0cc31785276d30a40b915fcf9"); + assert_eq!( + transaction_decode_rlp(&raw_tx), + Ok(Transaction { + sender: hex!("617da121abf03d4c1af572f5a4e313e26bef7bdc").into(), + unsigned: UnsignedTransaction { + nonce: 139275.into(), + gas_price: 1000000000.into(), + gas: 160000.into(), + to: Some(hex!("84dd11eb2a29615303d18149c0dbfa24167f8966").into()), + value: 0.into(), + payload: hex!("a9059cbb00000000000000000000000001503dfc5ad81bf630d83697e98601871bb211b60000000000000000000000000000000000000000000000000000000000002710").to_vec(), + }, + }), + ); + } + + #[test] + fn is_successful_raw_receipt_works() { + assert!(Receipt::is_successful_raw_receipt(&[]).is_err()); + + assert_eq!( + Receipt::is_successful_raw_receipt( + &Receipt { + outcome: TransactionOutcome::Unknown, + gas_used: Default::default(), + log_bloom: Default::default(), + logs: Vec::new(), + } + .rlp() + ), + Ok(false), + ); + assert_eq!( + Receipt::is_successful_raw_receipt( + &Receipt { + outcome: TransactionOutcome::StateRoot(Default::default()), + gas_used: Default::default(), + log_bloom: Default::default(), + logs: Vec::new(), + } + .rlp() + ), + Ok(false), + ); + assert_eq!( + Receipt::is_successful_raw_receipt( + &Receipt { + outcome: TransactionOutcome::StatusCode(0), + gas_used: Default::default(), + log_bloom: Default::default(), + logs: Vec::new(), + } + .rlp() + ), + Ok(false), + ); + assert_eq!( + Receipt::is_successful_raw_receipt( + &Receipt { + outcome: TransactionOutcome::StatusCode(1), + gas_used: Default::default(), + log_bloom: Default::default(), + logs: Vec::new(), + } + .rlp() + ), + Ok(true), + ); + } + + #[test] + fn is_successful_raw_receipt_with_empty_data() { + let mut stream = RlpStream::new(); + stream.begin_list(4); + stream.append_empty_data(); + stream.append(&1u64); + stream.append(&2u64); + stream.append(&3u64); + + assert_eq!(Receipt::is_successful_raw_receipt(&stream.out()), Ok(false),); + } +} diff --git a/polkadot/primitives/ethereum-poa/src/signatures.rs b/polkadot/primitives/ethereum-poa/src/signatures.rs new file mode 100644 index 00000000000..a4e076f2200 --- /dev/null +++ b/polkadot/primitives/ethereum-poa/src/signatures.rs @@ -0,0 +1,143 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . +// + +//! Helpers related to signatures. +//! +//! Used for testing and benchmarking. + +// reexport to avoid direct secp256k1 deps by other crates +pub use secp256k1::SecretKey; + +use crate::{ + public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction, UnsignedTransaction, H256, + H520, U256, +}; + +use secp256k1::{Message, PublicKey}; + +/// Utilities for signing headers. +pub trait SignHeader { + /// Signs header by given author. + fn sign_by(self, author: &SecretKey) -> AuraHeader; + /// Signs header by given authors set. + fn sign_by_set(self, authors: &[SecretKey]) -> AuraHeader; +} + +/// Utilities for signing transactions. +pub trait SignTransaction { + /// Sign transaction by given author. + fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction; +} + +impl SignHeader for AuraHeader { + fn sign_by(mut self, author: &SecretKey) -> Self { + self.author = secret_to_address(author); + + let message = self.seal_hash(false).unwrap(); + let signature = sign(author, message); + self.seal[1] = rlp_encode(&signature).to_vec(); + self + } + + fn sign_by_set(self, authors: &[SecretKey]) -> Self { + let step = self.step().unwrap(); + let author = step_validator(authors, step); + self.sign_by(author) + } +} + +impl SignTransaction for UnsignedTransaction { + fn sign_by(self, author: &SecretKey, chain_id: Option) -> RawTransaction { + let message = self.message(chain_id); + let signature = sign(author, message); + let signature_r = U256::from_big_endian(&signature.as_fixed_bytes()[..32][..]); + let signature_s = U256::from_big_endian(&signature.as_fixed_bytes()[32..64][..]); + let signature_v = signature.as_fixed_bytes()[64] as u64; + let signature_v = signature_v + if let Some(n) = chain_id { 35 + n * 2 } else { 27 }; + + let mut stream = rlp::RlpStream::new_list(9); + self.rlp_to(None, &mut stream); + stream.append(&signature_v); + stream.append(&signature_r); + stream.append(&signature_s); + stream.out().to_vec() + } +} + +/// Return author's signature over given message. +pub fn sign(author: &SecretKey, message: H256) -> H520 { + let (signature, recovery_id) = secp256k1::sign(&Message::parse(message.as_fixed_bytes()), author); + let mut raw_signature = [0u8; 65]; + raw_signature[..64].copy_from_slice(&signature.serialize()); + raw_signature[64] = recovery_id.serialize(); + raw_signature.into() +} + +/// Returns address corresponding to given secret key. +pub fn secret_to_address(secret: &SecretKey) -> Address { + let public = PublicKey::from_secret_key(secret); + let mut raw_public = [0u8; 64]; + raw_public.copy_from_slice(&public.serialize()[1..]); + public_to_address(&raw_public) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{transaction_decode_rlp, Transaction}; + + #[test] + fn transaction_signed_properly() { + // case1: with chain_id replay protection + to + let signer = SecretKey::parse(&[1u8; 32]).unwrap(); + let signer_address = secret_to_address(&signer); + let unsigned = UnsignedTransaction { + nonce: 100.into(), + gas_price: 200.into(), + gas: 300.into(), + to: Some([42u8; 20].into()), + value: 400.into(), + payload: vec![1, 2, 3], + }; + let raw_tx = unsigned.clone().sign_by(&signer, Some(42)); + assert_eq!( + transaction_decode_rlp(&raw_tx), + Ok(Transaction { + sender: signer_address, + unsigned, + }), + ); + + // case2: without chain_id replay protection + contract creation + let unsigned = UnsignedTransaction { + nonce: 100.into(), + gas_price: 200.into(), + gas: 300.into(), + to: None, + value: 400.into(), + payload: vec![1, 2, 3], + }; + let raw_tx = unsigned.clone().sign_by(&signer, None); + assert_eq!( + transaction_decode_rlp(&raw_tx), + Ok(Transaction { + sender: signer_address, + unsigned, + }), + ); + } +} diff --git a/polkadot/primitives/header-chain/Cargo.toml b/polkadot/primitives/header-chain/Cargo.toml new file mode 100644 index 00000000000..dc58dafb979 --- /dev/null +++ b/polkadot/primitives/header-chain/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "bp-header-chain" +description = "A common interface for describing what a bridge pallet should be able to do." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +finality-grandpa = { version = "0.14.0", default-features = false } +serde = { version = "1.0", optional = true } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +bp-test-utils = { path = "../test-utils" } + +[features] +default = ["std"] +std = [ + "codec/std", + "finality-grandpa/std", + "serde/std", + "frame-support/std", + "sp-core/std", + "sp-finality-grandpa/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/polkadot/primitives/header-chain/src/justification.rs b/polkadot/primitives/header-chain/src/justification.rs new file mode 100644 index 00000000000..139b4303243 --- /dev/null +++ b/polkadot/primitives/header-chain/src/justification.rs @@ -0,0 +1,185 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Pallet for checking GRANDPA Finality Proofs. +//! +//! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin +//! will ever be moved to the sp_finality_grandpa, we should reuse that implementation. + +use codec::{Decode, Encode}; +use finality_grandpa::{voter_set::VoterSet, Chain, Error as GrandpaError}; +use frame_support::RuntimeDebug; +use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId}; +use sp_runtime::traits::Header as HeaderT; +use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet}; +use sp_std::prelude::*; + +/// Justification verification error. +#[derive(RuntimeDebug, PartialEq)] +pub enum Error { + /// Failed to decode justification. + JustificationDecode, + /// Justification is finalizing unexpected header. + InvalidJustificationTarget, + /// Invalid commit in justification. + InvalidJustificationCommit, + /// Justification has invalid authority singature. + InvalidAuthoritySignature, + /// The justification has precommit for the header that has no route from the target header. + InvalidPrecommitAncestryProof, + /// The justification has 'unused' headers in its precommit ancestries. + InvalidPrecommitAncestries, +} + +/// Decode justification target. +pub fn decode_justification_target( + raw_justification: &[u8], +) -> Result<(Header::Hash, Header::Number), Error> { + GrandpaJustification::
::decode(&mut &*raw_justification) + .map(|justification| (justification.commit.target_hash, justification.commit.target_number)) + .map_err(|_| Error::JustificationDecode) +} + +/// Verify that justification, that is generated by given authority set, finalizes given header. +pub fn verify_justification( + finalized_target: (Header::Hash, Header::Number), + authorities_set_id: SetId, + authorities_set: &VoterSet, + justification: &GrandpaJustification
, +) -> Result<(), Error> +where + Header::Number: finality_grandpa::BlockNumberOps, +{ + // Ensure that it is justification for the expected header + if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { + return Err(Error::InvalidJustificationTarget); + } + + // Validate commit of the justification. Note that `validate_commit()` assumes that all + // signatures are valid. We'll check the validity of the signatures later since they're more + // resource intensive to verify. + let ancestry_chain = AncestryChain::new(&justification.votes_ancestries); + match finality_grandpa::validate_commit(&justification.commit, authorities_set, &ancestry_chain) { + Ok(ref result) if result.ghost().is_some() => {} + _ => return Err(Error::InvalidJustificationCommit), + } + + // Now that we know that the commit is correct, check authorities signatures + let mut buf = Vec::new(); + let mut visited_hashes = BTreeSet::new(); + for signed in &justification.commit.precommits { + if !sp_finality_grandpa::check_message_signature_with_buffer( + &finality_grandpa::Message::Precommit(signed.precommit.clone()), + &signed.id, + &signed.signature, + justification.round, + authorities_set_id, + &mut buf, + ) { + return Err(Error::InvalidAuthoritySignature); + } + + if justification.commit.target_hash == signed.precommit.target_hash { + continue; + } + + match ancestry_chain.ancestry(justification.commit.target_hash, signed.precommit.target_hash) { + Ok(route) => { + // ancestry starts from parent hash but the precommit target hash has been visited + visited_hashes.insert(signed.precommit.target_hash); + visited_hashes.extend(route); + } + _ => { + // could this happen in practice? I don't think so, but original code has this check + return Err(Error::InvalidPrecommitAncestryProof); + } + } + } + + let ancestry_hashes = justification + .votes_ancestries + .iter() + .map(|h: &Header| h.hash()) + .collect(); + if visited_hashes != ancestry_hashes { + return Err(Error::InvalidPrecommitAncestries); + } + + Ok(()) +} + +/// A GRANDPA Justification is a proof that a given header was finalized +/// at a certain height and with a certain set of authorities. +/// +/// This particular proof is used to prove that headers on a bridged chain +/// (so not our chain) have been finalized correctly. +#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq)] +pub struct GrandpaJustification { + /// The round (voting period) this justification is valid for. + pub round: u64, + /// The set of votes for the chain which is to be finalized. + pub commit: finality_grandpa::Commit, + /// A proof that the chain of blocks in the commit are related to each other. + pub votes_ancestries: Vec
, +} + +impl crate::FinalityProof for GrandpaJustification { + fn target_header_number(&self) -> H::Number { + self.commit.target_number + } +} + +/// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. +#[derive(RuntimeDebug)] +struct AncestryChain { + ancestry: BTreeMap, +} + +impl AncestryChain
{ + fn new(ancestry: &[Header]) -> AncestryChain
{ + AncestryChain { + ancestry: ancestry + .iter() + .map(|header| (header.hash(), *header.parent_hash())) + .collect(), + } + } +} + +impl finality_grandpa::Chain for AncestryChain
+where + Header::Number: finality_grandpa::BlockNumberOps, +{ + fn ancestry(&self, base: Header::Hash, block: Header::Hash) -> Result, GrandpaError> { + let mut route = Vec::new(); + let mut current_hash = block; + loop { + if current_hash == base { + break; + } + match self.ancestry.get(¤t_hash).cloned() { + Some(parent_hash) => { + current_hash = parent_hash; + route.push(current_hash); + } + _ => return Err(GrandpaError::NotDescendent), + } + } + route.pop(); // remove the base + + Ok(route) + } +} diff --git a/polkadot/primitives/header-chain/src/lib.rs b/polkadot/primitives/header-chain/src/lib.rs new file mode 100644 index 00000000000..adac6eb2688 --- /dev/null +++ b/polkadot/primitives/header-chain/src/lib.rs @@ -0,0 +1,133 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Defines traits which represent a common interface for Substrate pallets which want to +//! incorporate bridge functionality. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Codec, Decode, Encode, EncodeLike}; +use core::clone::Clone; +use core::cmp::Eq; +use core::default::Default; +use core::fmt::Debug; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use sp_finality_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; +use sp_runtime::RuntimeDebug; +use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT}; + +pub mod justification; + +/// A type that can be used as a parameter in a dispatchable function. +/// +/// When using `decl_module` all arguments for call functions must implement this trait. +pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug {} +impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug {} + +/// A GRANDPA Authority List and ID. +#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct AuthoritySet { + /// List of GRANDPA authorities for the current round. + pub authorities: AuthorityList, + /// Monotonic identifier of the current GRANDPA authority set. + pub set_id: SetId, +} + +impl AuthoritySet { + /// Create a new GRANDPA Authority Set. + pub fn new(authorities: AuthorityList, set_id: SetId) -> Self { + Self { authorities, set_id } + } +} + +/// Data required for initializing the bridge pallet. +/// +/// The bridge needs to know where to start its sync from, and this provides that initial context. +#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub struct InitializationData { + /// The header from which we should start syncing. + pub header: H, + /// The initial authorities of the pallet. + pub authority_list: AuthorityList, + /// The ID of the initial authority set. + pub set_id: SetId, + /// Should the pallet block transaction immediately after initialization. + pub is_halted: bool, +} + +/// base trait for verifying transaction inclusion proofs. +pub trait InclusionProofVerifier { + /// Transaction type. + type Transaction: Parameter; + /// Transaction inclusion proof type. + type TransactionInclusionProof: Parameter; + + /// Verify that transaction is a part of given block. + /// + /// Returns Some(transaction) if proof is valid and None otherwise. + fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option; +} + +/// A trait for pallets which want to keep track of finalized headers from a bridged chain. +pub trait HeaderChain { + /// Get the best finalized header known to the header chain. + fn best_finalized() -> H; + + /// Get the best authority set known to the header chain. + fn authority_set() -> AuthoritySet; + + /// Write a header finalized by GRANDPA to the underlying pallet storage. + fn append_header(header: H) -> Result<(), E>; +} + +impl HeaderChain for () { + fn best_finalized() -> H { + H::default() + } + + fn authority_set() -> AuthoritySet { + AuthoritySet::default() + } + + fn append_header(_header: H) -> Result<(), E> { + Ok(()) + } +} + +/// Abstract finality proof that is justifying block finality. +pub trait FinalityProof: Clone + Send + Sync + Debug { + /// Return number of header that this proof is generated for. + fn target_header_number(&self) -> Number; +} + +/// Find header digest that schedules next GRANDPA authorities set. +pub fn find_grandpa_authorities_scheduled_change( + header: &H, +) -> Option> { + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +} diff --git a/polkadot/primitives/header-chain/tests/justification.rs b/polkadot/primitives/header-chain/tests/justification.rs new file mode 100644 index 00000000000..1ce739e4536 --- /dev/null +++ b/polkadot/primitives/header-chain/tests/justification.rs @@ -0,0 +1,191 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tests for Grandpa Justification code. + +use bp_header_chain::justification::{verify_justification, Error}; +use bp_test_utils::*; + +type TestHeader = sp_runtime::testing::Header; + +#[test] +fn valid_justification_accepted() { + let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)]; + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: authorities.clone(), + votes: 7, + forks: 3, + }; + + let justification = make_justification_for_header::(params.clone()); + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &justification, + ), + Ok(()), + ); + + assert_eq!(justification.commit.precommits.len(), authorities.len()); + assert_eq!(justification.votes_ancestries.len(), params.votes as usize); +} + +#[test] +fn valid_justification_accepted_with_single_fork() { + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)], + votes: 5, + forks: 1, + }; + + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &make_justification_for_header::(params) + ), + Ok(()), + ); +} + +#[test] +fn valid_justification_accepted_with_arbitrary_number_of_authorities() { + use finality_grandpa::voter_set::VoterSet; + use sp_finality_grandpa::AuthorityId; + + let n = 15; + let authorities = accounts(n).iter().map(|k| (*k, 1)).collect::>(); + + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: authorities.clone(), + votes: n.into(), + forks: n.into(), + }; + + let authorities = authorities + .iter() + .map(|(id, w)| (AuthorityId::from(*id), *w)) + .collect::>(); + let voter_set = VoterSet::new(authorities).unwrap(); + + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set, + &make_justification_for_header::(params) + ), + Ok(()), + ); +} + +#[test] +fn justification_with_invalid_target_rejected() { + assert_eq!( + verify_justification::( + header_id::(2), + TEST_GRANDPA_SET_ID, + &voter_set(), + &make_default_justification::(&test_header(1)), + ), + Err(Error::InvalidJustificationTarget), + ); +} + +#[test] +fn justification_with_invalid_commit_rejected() { + let mut justification = make_default_justification::(&test_header(1)); + justification.commit.precommits.clear(); + + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &justification, + ), + Err(Error::InvalidJustificationCommit), + ); +} + +#[test] +fn justification_with_invalid_authority_signature_rejected() { + let mut justification = make_default_justification::(&test_header(1)); + justification.commit.precommits[0].signature = Default::default(); + + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &justification, + ), + Err(Error::InvalidAuthoritySignature), + ); +} + +#[test] +fn justification_with_invalid_precommit_ancestry() { + let mut justification = make_default_justification::(&test_header(1)); + justification.votes_ancestries.push(test_header(10)); + + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &justification, + ), + Err(Error::InvalidPrecommitAncestries), + ); +} + +#[test] +fn justification_is_invalid_if_we_dont_meet_threshold() { + // Need at least three authorities to sign off or else the voter set threshold can't be reached + let authorities = vec![(ALICE, 1), (BOB, 1)]; + + let params = JustificationGeneratorParams { + header: test_header(1), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: authorities.clone(), + votes: 2 * authorities.len() as u32, + forks: 2, + }; + + assert_eq!( + verify_justification::( + header_id::(1), + TEST_GRANDPA_SET_ID, + &voter_set(), + &make_justification_for_header::(params) + ), + Err(Error::InvalidJustificationCommit), + ); +} diff --git a/polkadot/primitives/message-dispatch/Cargo.toml b/polkadot/primitives/message-dispatch/Cargo.toml new file mode 100644 index 00000000000..293c637e8df --- /dev/null +++ b/polkadot/primitives/message-dispatch/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "bp-message-dispatch" +description = "Primitives of bridge messages dispatch modules." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +bp-runtime = { path = "../runtime", default-features = false } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-runtime/std", + "codec/std", +] diff --git a/polkadot/primitives/message-dispatch/src/lib.rs b/polkadot/primitives/message-dispatch/src/lib.rs new file mode 100644 index 00000000000..3b83e38517e --- /dev/null +++ b/polkadot/primitives/message-dispatch/src/lib.rs @@ -0,0 +1,49 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! A common interface for all Bridge Message Dispatch modules. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +use bp_runtime::InstanceId; + +/// Message dispatch weight. +pub type Weight = u64; + +/// A generic trait to dispatch arbitrary messages delivered over the bridge. +pub trait MessageDispatch { + /// A type of the message to be dispatched. + type Message: codec::Decode; + + /// Estimate dispatch weight. + /// + /// This function must: (1) be instant and (2) return correct upper bound + /// of dispatch weight. + fn dispatch_weight(message: &Self::Message) -> Weight; + + /// Dispatches the message internally. + /// + /// `bridge` indicates instance of deployed bridge where the message came from. + /// + /// `id` is a short unique identifier of the message. + /// + /// If message is `Ok`, then it should be dispatched. If it is `Err`, then it's just + /// a sign that some other component has rejected the message even before it has + /// reached `dispatch` method (right now this may only be caused if we fail to decode + /// the whole message). + fn dispatch(bridge: InstanceId, id: MessageId, message: Result); +} diff --git a/polkadot/primitives/messages/Cargo.toml b/polkadot/primitives/messages/Cargo.toml new file mode 100644 index 00000000000..9cb037a34ce --- /dev/null +++ b/polkadot/primitives/messages/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "bp-messages" +description = "Primitives of messages module." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } + +# Bridge dependencies + +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[features] +default = ["std"] +std = [ + "bp-runtime/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "sp-std/std" +] diff --git a/polkadot/primitives/messages/src/lib.rs b/polkadot/primitives/messages/src/lib.rs new file mode 100644 index 00000000000..c3ffce8baa5 --- /dev/null +++ b/polkadot/primitives/messages/src/lib.rs @@ -0,0 +1,228 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Primitives of messages module. + +#![cfg_attr(not(feature = "std"), no_std)] +// RuntimeApi generated functions +#![allow(clippy::too_many_arguments)] +// Generated by `DecodeLimit::decode_with_depth_limit` +#![allow(clippy::unnecessary_mut_passed)] + +use codec::{Decode, Encode}; +use frame_support::RuntimeDebug; +use sp_std::{collections::vec_deque::VecDeque, prelude::*}; + +pub mod source_chain; +pub mod target_chain; + +// Weight is reexported to avoid additional frame-support dependencies in related crates. +pub use frame_support::weights::Weight; + +/// Messages pallet parameter. +pub trait Parameter: frame_support::Parameter { + /// Save parameter value in the runtime storage. + fn save(&self); +} + +/// Lane identifier. +pub type LaneId = [u8; 4]; + +/// Message nonce. Valid messages will never have 0 nonce. +pub type MessageNonce = u64; + +/// Message id as a tuple. +pub type MessageId = (LaneId, MessageNonce); + +/// Opaque message payload. We only decode this payload when it is dispatched. +pub type MessagePayload = Vec; + +/// Message key (unique message identifier) as it is stored in the storage. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct MessageKey { + /// ID of the message lane. + pub lane_id: LaneId, + /// Message nonce. + pub nonce: MessageNonce, +} + +/// Message data as it is stored in the storage. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct MessageData { + /// Message payload. + pub payload: MessagePayload, + /// Message delivery and dispatch fee, paid by the submitter. + pub fee: Fee, +} + +/// Message as it is stored in the storage. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct Message { + /// Message key. + pub key: MessageKey, + /// Message data. + pub data: MessageData, +} + +/// Inbound lane data. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] +pub struct InboundLaneData { + /// Identifiers of relayers and messages that they have delivered to this lane (ordered by message nonce). + /// + /// This serves as a helper storage item, to allow the source chain to easily pay rewards + /// to the relayers who succesfuly delivered messages to the target chain (inbound lane). + /// + /// It is guaranteed to have at most N entries, where N is configured at the module level. + /// If there are N entries in this vec, then: + /// 1) all incoming messages are rejected if they're missing corresponding `proof-of(outbound-lane.state)`; + /// 2) all incoming messages are rejected if `proof-of(outbound-lane.state).last_delivered_nonce` is + /// equal to `self.last_confirmed_nonce`. + /// Given what is said above, all nonces in this queue are in range: + /// `(self.last_confirmed_nonce; self.last_delivered_nonce()]`. + /// + /// When a relayer sends a single message, both of MessageNonces are the same. + /// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the highest nonce. + /// Multiple dispatches from the same relayer are allowed. + pub relayers: VecDeque<(MessageNonce, MessageNonce, RelayerId)>, + + /// Nonce of the last message that + /// a) has been delivered to the target (this) chain and + /// b) the delivery has been confirmed on the source chain + /// + /// that the target chain knows of. + /// + /// This value is updated indirectly when an `OutboundLane` state of the source + /// chain is received alongside with new messages delivery. + pub last_confirmed_nonce: MessageNonce, +} + +impl Default for InboundLaneData { + fn default() -> Self { + InboundLaneData { + relayers: VecDeque::new(), + last_confirmed_nonce: 0, + } + } +} + +impl InboundLaneData { + /// Returns approximate size of the struct, given number of entries in the `relayers` set and + /// size of each entry. + /// + /// Returns `None` if size overflows `u32` limits. + pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32) -> Option { + let message_nonce_size = 8; + let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?; + let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?; + relayers_size.checked_add(message_nonce_size) + } + + /// Nonce of the last message that has been delivered to this (target) chain. + pub fn last_delivered_nonce(&self) -> MessageNonce { + self.relayers + .back() + .map(|(_, last_nonce, _)| *last_nonce) + .unwrap_or(self.last_confirmed_nonce) + } +} + +/// Gist of `InboundLaneData::relayers` field used by runtime APIs. +#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq)] +pub struct UnrewardedRelayersState { + /// Number of entries in the `InboundLaneData::relayers` set. + pub unrewarded_relayer_entries: MessageNonce, + /// Number of messages in the oldest entry of `InboundLaneData::relayers`. This is the + /// minimal number of reward proofs required to push out this entry from the set. + pub messages_in_oldest_entry: MessageNonce, + /// Total number of messages in the relayers vector. + pub total_messages: MessageNonce, +} + +/// Outbound lane data. +#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] +pub struct OutboundLaneData { + /// Nonce of oldest message that we haven't yet pruned. May point to not-yet-generated message if + /// all sent messages are already pruned. + pub oldest_unpruned_nonce: MessageNonce, + /// Nonce of latest message, received by bridged chain. + pub latest_received_nonce: MessageNonce, + /// Nonce of latest message, generated by us. + pub latest_generated_nonce: MessageNonce, +} + +impl Default for OutboundLaneData { + fn default() -> Self { + OutboundLaneData { + // it is 1 because we're pruning everything in [oldest_unpruned_nonce; latest_received_nonce] + oldest_unpruned_nonce: 1, + latest_received_nonce: 0, + latest_generated_nonce: 0, + } + } +} + +/// Returns total number of messages in the `InboundLaneData::relayers` vector. +/// +/// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`). +pub fn total_unrewarded_messages( + relayers: &VecDeque<(MessageNonce, MessageNonce, RelayerId)>, +) -> Option { + match (relayers.front(), relayers.back()) { + (Some((begin, _, _)), Some((_, end, _))) => { + if let Some(difference) = end.checked_sub(*begin) { + difference.checked_add(1) + } else { + Some(0) + } + } + _ => Some(0), + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn total_unrewarded_messages_does_not_overflow() { + assert_eq!( + total_unrewarded_messages( + &vec![(0, 0, 1), (MessageNonce::MAX, MessageNonce::MAX, 2)] + .into_iter() + .collect() + ), + None, + ); + } + + #[test] + fn inbound_lane_data_returns_correct_hint() { + let expected_size = InboundLaneData::::encoded_size_hint(1, 13); + let actual_size = InboundLaneData { + relayers: (1u8..=13u8).map(|i| (i as _, i as _, i)).collect(), + last_confirmed_nonce: 13, + } + .encode() + .len(); + let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs(); + assert!( + difference / (std::cmp::min(actual_size, expected_size.unwrap() as usize) as f64) < 0.1, + "Too large difference between actual ({}) and expected ({:?}) inbound lane data size", + actual_size, + expected_size, + ); + } +} diff --git a/polkadot/primitives/messages/src/source_chain.rs b/polkadot/primitives/messages/src/source_chain.rs new file mode 100644 index 00000000000..1d313634bcb --- /dev/null +++ b/polkadot/primitives/messages/src/source_chain.rs @@ -0,0 +1,192 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Primitives of messages module, that are used on the source chain. + +use crate::{InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; + +use bp_runtime::Size; +use frame_support::{Parameter, RuntimeDebug}; +use sp_std::{collections::btree_map::BTreeMap, fmt::Debug}; + +/// The sender of the message on the source chain. +pub type Sender = frame_system::RawOrigin; + +/// Relayers rewards, grouped by relayer account id. +pub type RelayersRewards = BTreeMap>; + +/// Single relayer rewards. +#[derive(RuntimeDebug, Default)] +pub struct RelayerRewards { + /// Total rewards that are to be paid to the relayer. + pub reward: Balance, + /// Total number of messages relayed by this relayer. + pub messages: MessageNonce, +} + +/// Target chain API. Used by source chain to verify target chain proofs. +/// +/// All implementations of this trait should only work with finalized data that +/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane +/// that's stuck) and/or processing messages without paying fees. +pub trait TargetHeaderChain { + /// Error type. + type Error: Debug + Into<&'static str>; + + /// Proof that messages have been received by target chain. + type MessagesDeliveryProof: Parameter + Size; + + /// Verify message payload before we accept it. + /// + /// **CAUTION**: this is very important function. Incorrect implementation may lead + /// to stuck lanes and/or relayers loses. + /// + /// The proper implementation must ensure that the delivery-transaction with this + /// payload would (at least) be accepted into target chain transaction pool AND + /// eventually will be successfully 'mined'. The most obvious incorrect implementation + /// example would be implementation for BTC chain that accepts payloads larger than + /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer + /// will be unable to craft valid transaction => this (and all subsequent) messages will + /// never be delivered. + fn verify_message(payload: &Payload) -> Result<(), Self::Error>; + + /// Verify messages delivery proof and return lane && nonce of the latest recevied message. + fn verify_messages_delivery_proof( + proof: Self::MessagesDeliveryProof, + ) -> Result<(LaneId, InboundLaneData), Self::Error>; +} + +/// Lane message verifier. +/// +/// Runtime developer may implement any additional validation logic over message-lane mechanism. +/// E.g. if lanes should have some security (e.g. you can only accept Lane1 messages from +/// Submitter1, Lane2 messages for those who has submitted first message to this lane, disable +/// Lane3 until some block, ...), then it may be built using this verifier. +/// +/// Any fee requirements should also be enforced here. +pub trait LaneMessageVerifier { + /// Error type. + type Error: Debug + Into<&'static str>; + + /// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the lane. + fn verify_message( + submitter: &Sender, + delivery_and_dispatch_fee: &Fee, + lane: &LaneId, + outbound_data: &OutboundLaneData, + payload: &Payload, + ) -> Result<(), Self::Error>; +} + +/// Message delivery payment. It is called as a part of submit-message transaction. Transaction +/// submitter is paying (in source chain tokens/assets) for: +/// +/// 1) submit-message-transaction-fee itself. This fee is not included in the +/// `delivery_and_dispatch_fee` and is witheld by the regular transaction payment mechanism; +/// 2) message-delivery-transaction-fee. It is submitted to the target node by relayer; +/// 3) message-dispatch fee. It is paid by relayer for processing message by target chain; +/// 4) message-receiving-delivery-transaction-fee. It is submitted to the source node +/// by relayer. +/// +/// So to be sure that any non-altruist relayer would agree to deliver message, submitter +/// should set `delivery_and_dispatch_fee` to at least (equialent of): sum of fees from (2) +/// to (4) above, plus some interest for the relayer. +pub trait MessageDeliveryAndDispatchPayment { + /// Error type. + type Error: Debug + Into<&'static str>; + + /// Withhold/write-off delivery_and_dispatch_fee from submitter account to + /// some relayers-fund account. + fn pay_delivery_and_dispatch_fee( + submitter: &Sender, + fee: &Balance, + relayer_fund_account: &AccountId, + ) -> Result<(), Self::Error>; + + /// Pay rewards for delivering messages to the given relayers. + /// + /// The implementation may also choose to pay reward to the `confirmation_relayer`, which is + /// a relayer that has submitted delivery confirmation transaction. + fn pay_relayers_rewards( + confirmation_relayer: &AccountId, + relayers_rewards: RelayersRewards, + relayer_fund_account: &AccountId, + ); + + /// Perform some initialization in externalities-provided environment. + /// + /// For instance you may ensure that particular required accounts or storage items are present. + /// Returns the number of storage reads performed. + fn initialize(_relayer_fund_account: &AccountId) -> usize { + 0 + } +} + +/// Structure that may be used in place of `TargetHeaderChain`, `LaneMessageVerifier` and +/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden. +pub struct ForbidOutboundMessages; + +/// Error message that is used in `ForbidOutboundMessages` implementation. +const ALL_OUTBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all outbound messages"; + +impl TargetHeaderChain for ForbidOutboundMessages { + type Error = &'static str; + + type MessagesDeliveryProof = (); + + fn verify_message(_payload: &Payload) -> Result<(), Self::Error> { + Err(ALL_OUTBOUND_MESSAGES_REJECTED) + } + + fn verify_messages_delivery_proof( + _proof: Self::MessagesDeliveryProof, + ) -> Result<(LaneId, InboundLaneData), Self::Error> { + Err(ALL_OUTBOUND_MESSAGES_REJECTED) + } +} + +impl LaneMessageVerifier for ForbidOutboundMessages { + type Error = &'static str; + + fn verify_message( + _submitter: &Sender, + _delivery_and_dispatch_fee: &Fee, + _lane: &LaneId, + _outbound_data: &OutboundLaneData, + _payload: &Payload, + ) -> Result<(), Self::Error> { + Err(ALL_OUTBOUND_MESSAGES_REJECTED) + } +} + +impl MessageDeliveryAndDispatchPayment for ForbidOutboundMessages { + type Error = &'static str; + + fn pay_delivery_and_dispatch_fee( + _submitter: &Sender, + _fee: &Balance, + _relayer_fund_account: &AccountId, + ) -> Result<(), Self::Error> { + Err(ALL_OUTBOUND_MESSAGES_REJECTED) + } + + fn pay_relayers_rewards( + _confirmation_relayer: &AccountId, + _relayers_rewards: RelayersRewards, + _relayer_fund_account: &AccountId, + ) { + } +} diff --git a/polkadot/primitives/messages/src/target_chain.rs b/polkadot/primitives/messages/src/target_chain.rs new file mode 100644 index 00000000000..676e919bc61 --- /dev/null +++ b/polkadot/primitives/messages/src/target_chain.rs @@ -0,0 +1,160 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Primitives of messages module, that are used on the target chain. + +use crate::{LaneId, Message, MessageData, MessageKey, OutboundLaneData}; + +use bp_runtime::Size; +use codec::{Decode, Encode, Error as CodecError}; +use frame_support::{weights::Weight, Parameter, RuntimeDebug}; +use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, prelude::*}; + +/// Proved messages from the source chain. +pub type ProvedMessages = BTreeMap>; + +/// Proved messages from single lane of the source chain. +#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq)] +pub struct ProvedLaneMessages { + /// Optional outbound lane state. + pub lane_state: Option, + /// Messages sent through this lane. + pub messages: Vec, +} + +/// Message data with decoded dispatch payload. +#[derive(RuntimeDebug)] +pub struct DispatchMessageData { + /// Result of dispatch payload decoding. + pub payload: Result, + /// Message delivery and dispatch fee, paid by the submitter. + pub fee: Fee, +} + +/// Message with decoded dispatch payload. +#[derive(RuntimeDebug)] +pub struct DispatchMessage { + /// Message key. + pub key: MessageKey, + /// Message data with decoded dispatch payload. + pub data: DispatchMessageData, +} + +/// Source chain API. Used by target chain, to verify source chain proofs. +/// +/// All implementations of this trait should only work with finalized data that +/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane +/// that's stuck) and/or processing messages without paying fees. +pub trait SourceHeaderChain { + /// Error type. + type Error: Debug + Into<&'static str>; + + /// Proof that messages are sent from source chain. This may also include proof + /// of corresponding outbound lane states. + type MessagesProof: Parameter + Size; + + /// Verify messages proof and return proved messages. + /// + /// Returns error if either proof is incorrect, or the number of messages in the proof + /// is not matching the `messages_count`. + /// + /// Messages vector is required to be sorted by nonce within each lane. Out-of-order + /// messages will be rejected. + /// + /// The `messages_count` argument verification (sane limits) is supposed to be made + /// outside of this function. This function only verifies that the proof declares exactly + /// `messages_count` messages. + fn verify_messages_proof( + proof: Self::MessagesProof, + messages_count: u32, + ) -> Result>, Self::Error>; +} + +/// Called when inbound message is received. +pub trait MessageDispatch { + /// Decoded message payload type. Valid message may contain invalid payload. In this case + /// message is delivered, but dispatch fails. Therefore, two separate types of payload + /// (opaque `MessagePayload` used in delivery and this `DispatchPayload` used in dispatch). + type DispatchPayload: Decode; + + /// Estimate dispatch weight. + /// + /// This function must: (1) be instant and (2) return correct upper bound + /// of dispatch weight. + fn dispatch_weight(message: &DispatchMessage) -> Weight; + + /// Called when inbound message is received. + /// + /// It is up to the implementers of this trait to determine whether the message + /// is invalid (i.e. improperly encoded, has too large weight, ...) or not. + fn dispatch(message: DispatchMessage); +} + +impl Default for ProvedLaneMessages { + fn default() -> Self { + ProvedLaneMessages { + lane_state: None, + messages: Vec::new(), + } + } +} + +impl From> for DispatchMessage { + fn from(message: Message) -> Self { + DispatchMessage { + key: message.key, + data: message.data.into(), + } + } +} + +impl From> for DispatchMessageData { + fn from(data: MessageData) -> Self { + DispatchMessageData { + payload: DispatchPayload::decode(&mut &data.payload[..]), + fee: data.fee, + } + } +} + +/// Structure that may be used in place of `SourceHeaderChain` and `MessageDispatch` on chains, +/// where inbound messages are forbidden. +pub struct ForbidInboundMessages; + +/// Error message that is used in `ForbidOutboundMessages` implementation. +const ALL_INBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all inbound messages"; + +impl SourceHeaderChain for ForbidInboundMessages { + type Error = &'static str; + type MessagesProof = (); + + fn verify_messages_proof( + _proof: Self::MessagesProof, + _messages_count: u32, + ) -> Result>, Self::Error> { + Err(ALL_INBOUND_MESSAGES_REJECTED) + } +} + +impl MessageDispatch for ForbidInboundMessages { + type DispatchPayload = (); + + fn dispatch_weight(_message: &DispatchMessage) -> Weight { + Weight::MAX + } + + fn dispatch(_message: DispatchMessage) {} +} diff --git a/polkadot/primitives/polkadot-core/Cargo.toml b/polkadot/primitives/polkadot-core/Cargo.toml new file mode 100644 index 00000000000..995f948e5d4 --- /dev/null +++ b/polkadot/primitives/polkadot-core/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "bp-polkadot-core" +description = "Primitives of Polkadot-like runtime." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] } + +# Bridge Dependencies + +bp-messages = { path = "../messages", default-features = false } +bp-runtime = { path = "../runtime", default-features = false } + +# Substrate Based Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[dev-dependencies] +hex = "0.4" + +[features] +default = ["std"] +std = [ + "bp-messages/std", + "bp-runtime/std", + "frame-support/std", + "frame-system/std", + "parity-scale-codec/std", + "sp-api/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", + "sp-version/std", +] diff --git a/polkadot/primitives/polkadot-core/src/lib.rs b/polkadot/primitives/polkadot-core/src/lib.rs new file mode 100644 index 00000000000..c9858c0820d --- /dev/null +++ b/polkadot/primitives/polkadot-core/src/lib.rs @@ -0,0 +1,350 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg_attr(not(feature = "std"), no_std)] + +use bp_messages::MessageNonce; +use bp_runtime::Chain; +use frame_support::{ + dispatch::Dispatchable, + parameter_types, + weights::{ + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, WEIGHT_PER_SECOND}, + DispatchClass, Weight, + }, + Blake2_128Concat, RuntimeDebug, StorageHasher, Twox128, +}; +use frame_system::limits; +use parity_scale_codec::Compact; +use sp_core::Hasher as HasherT; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiAddress, MultiSignature, OpaqueExtrinsic, Perbill, +}; +use sp_std::prelude::Vec; + +// Re-export's to avoid extra substrate dependencies in chain-specific crates. +pub use frame_support::Parameter; +pub use sp_runtime::traits::Convert; + +/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at +/// Polkadot-like chain. This mostly depends on number of entries in the storage trie. +/// Some reserve is reserved to account future chain growth. +/// +/// To compute this value, we've synced Kusama chain blocks [0; 6545733] to see if there were +/// any significant changes of the storage proof size (NO): +/// +/// - at block 3072 the storage proof size overhead was 579 bytes; +/// - at block 2479616 it was 578 bytes; +/// - at block 4118528 it was 711 bytes; +/// - at block 6540800 it was 779 bytes. +/// +/// The number of storage entries at the block 6546170 was 351207 and number of trie nodes in +/// the storage proof was 5 (log(16, 351207) ~ 4.6). +/// +/// So the assumption is that the storage proof size overhead won't be larger than 1024 in the +/// nearest future. If it'll ever break this barrier, then we'll need to update this constant +/// at next runtime upgrade. +pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; + +/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. +/// +/// All polkadot-like chains are using same crypto. +pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; + +/// All Polkadot-like chains allow normal extrinsics to fill block up to 75%. +/// +/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); + +/// All Polkadot-like chains allow 2 seconds of compute with a 6 second average block time. +/// +/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. +pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + +/// All Polkadot-like chains assume that an on-initialize consumes 1% of the weight on average, +/// hence a single extrinsic will not be allowed to consume more than `AvailableBlockRatio - 1%`. +/// +/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. +pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); + +parameter_types! { + /// All Polkadot-like chains have maximal block size set to 5MB. + /// + /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. + pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio( + 5 * 1024 * 1024, + NORMAL_DISPATCH_RATIO, + ); + /// All Polkadot-like chains have the same block weights. + /// + /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have an extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT, + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); +} + +/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can use. +pub fn max_extrinsic_weight() -> Weight { + BlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic + .unwrap_or(Weight::MAX) +} + +/// Get the maximum length in bytes that a Normal extrinsic on the Polkadot-like chain requires. +pub fn max_extrinsic_size() -> u32 { + *BlockLength::get().max.get(DispatchClass::Normal) +} + +// TODO [#78] may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 +/// Maximal number of messages in single delivery transaction. +pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; + +/// Maximal number of unrewarded relayer entries at inbound lane. +pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128; + +// TODO [#438] should be selected keeping in mind: +// finality delay on both chains + reward payout cost + messages throughput. +/// Maximal number of unconfirmed messages at inbound lane. +pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192; + +/// Re-export `time_units` to make usage easier. +pub use time_units::*; + +/// Human readable time units defined in terms of number of blocks. +pub mod time_units { + use super::BlockNumber; + + pub const MILLISECS_PER_BLOCK: u64 = 6000; + pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + + pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; +} + +/// Block number type used in Polkadot-like chains. +pub type BlockNumber = u32; + +/// Hash type used in Polkadot-like chains. +pub type Hash = ::Out; + +/// Account Index (a.k.a. nonce). +pub type Index = u32; + +/// Hashing type. +pub type Hashing = BlakeTwo256; + +/// The type of an object that can produce hashes on Polkadot-like chains. +pub type Hasher = BlakeTwo256; + +/// The header type used by Polkadot-like chains. +pub type Header = generic::Header; + +/// Signature type used by Polkadot-like chains. +pub type Signature = MultiSignature; + +/// Public key of account on Polkadot-like chains. +pub type AccountPublic = ::Signer; + +/// Id of account on Polkadot-like chains. +pub type AccountId = ::AccountId; + +/// Index of a transaction on the Polkadot-like chains. +pub type Nonce = u32; + +/// Block type of Polkadot-like chains. +pub type Block = generic::Block; + +/// Polkadot-like block signed with a Justification. +pub type SignedBlock = generic::SignedBlock; + +/// The balance of an account on Polkadot-like chain. +pub type Balance = u128; + +/// Unchecked Extrinsic type. +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic, Call, Signature, SignedExtensions>; + +/// A type of the data encoded as part of the transaction. +pub type SignedExtra = ( + (), + (), + (), + sp_runtime::generic::Era, + Compact, + (), + Compact, +); + +/// Parameters which are part of the payload used to produce transaction signature, +/// but don't end up in the transaction itself (i.e. inherent part of the runtime). +pub type AdditionalSigned = (u32, u32, Hash, Hash, (), (), ()); + +/// A simplified version of signed extensions meant for producing signed transactions +/// and signed payload in the client code. +#[derive(PartialEq, Eq, Clone, RuntimeDebug)] +pub struct SignedExtensions { + encode_payload: SignedExtra, + additional_signed: AdditionalSigned, + _data: sp_std::marker::PhantomData, +} + +impl parity_scale_codec::Encode for SignedExtensions { + fn using_encoded R>(&self, f: F) -> R { + self.encode_payload.using_encoded(f) + } +} + +impl parity_scale_codec::Decode for SignedExtensions { + fn decode(_input: &mut I) -> Result { + unimplemented!("SignedExtensions are never meant to be decoded, they are only used to create transaction"); + } +} + +impl SignedExtensions { + pub fn new( + version: sp_version::RuntimeVersion, + era: sp_runtime::generic::Era, + genesis_hash: Hash, + nonce: Nonce, + tip: Balance, + ) -> Self { + Self { + encode_payload: ( + (), // spec version + (), // tx version + (), // genesis + era, // era + nonce.into(), // nonce (compact encoding) + (), // Check weight + tip.into(), // transaction payment / tip (compact encoding) + ), + additional_signed: ( + version.spec_version, + version.transaction_version, + genesis_hash, + genesis_hash, + (), + (), + (), + ), + _data: Default::default(), + } + } +} + +impl sp_runtime::traits::SignedExtension for SignedExtensions +where + Call: parity_scale_codec::Codec + sp_std::fmt::Debug + Sync + Send + Clone + Eq + PartialEq, + Call: Dispatchable, +{ + const IDENTIFIER: &'static str = "Not needed."; + + type AccountId = AccountId; + type Call = Call; + type AdditionalSigned = AdditionalSigned; + type Pre = (); + + fn additional_signed(&self) -> Result { + Ok(self.additional_signed) + } +} + +/// Polkadot-like chain. +#[derive(RuntimeDebug)] +pub struct PolkadotLike; + +impl Chain for PolkadotLike { + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hasher = Hasher; + type Header = Header; +} + +/// Convert a 256-bit hash into an AccountId. +pub struct AccountIdConverter; + +impl Convert for AccountIdConverter { + fn convert(hash: sp_core::H256) -> AccountId { + hash.to_fixed_bytes().into() + } +} + +/// Return a storage key for account data. +/// +/// This is based on FRAME storage-generation code from Substrate: +/// https://github.com/paritytech/substrate/blob/c939ceba381b6313462d47334f775e128ea4e95d/frame/support/src/storage/generator/map.rs#L74 +/// The equivalent command to invoke in case full `Runtime` is known is this: +/// `let key = frame_system::Account::::storage_map_final_key(&account_id);` +pub fn account_info_storage_key(id: &AccountId) -> Vec { + let module_prefix_hashed = Twox128::hash(b"System"); + let storage_prefix_hashed = Twox128::hash(b"Account"); + let key_hashed = parity_scale_codec::Encode::using_encoded(id, Blake2_128Concat::hash); + + let mut final_key = Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len()); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(&key_hashed); + + final_key +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::codec::Encode; + + #[test] + fn maximal_encoded_account_id_size_is_correct() { + let actual_size = AccountId::default().encode().len(); + assert!( + actual_size <= MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize, + "Actual size of encoded account id for Polkadot-like chains ({}) is larger than expected {}", + actual_size, + MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, + ); + } + + #[test] + fn should_generate_storage_key() { + let acc = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, + ] + .into(); + let key = account_info_storage_key(&acc); + assert_eq!(hex::encode(key), "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92dccd599abfe1920a1cff8a7358231430102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"); + } +} diff --git a/polkadot/primitives/runtime/Cargo.toml b/polkadot/primitives/runtime/Cargo.toml new file mode 100644 index 00000000000..17fa96b2c90 --- /dev/null +++ b/polkadot/primitives/runtime/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "bp-runtime" +description = "Primitives that may be used at (bridges) runtime level." +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +hash-db = { version = "0.15.2", default-features = false } +num-traits = { version = "0.2", default-features = false } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false } + +[dev-dependencies] +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } + + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "hash-db/std", + "num-traits/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", + "sp-state-machine/std", + "sp-trie/std", +] diff --git a/polkadot/primitives/runtime/src/chain.rs b/polkadot/primitives/runtime/src/chain.rs new file mode 100644 index 00000000000..cb19c6e7268 --- /dev/null +++ b/polkadot/primitives/runtime/src/chain.rs @@ -0,0 +1,87 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use frame_support::Parameter; +use num_traits::AsPrimitive; +use sp_runtime::traits::{ + AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, MaybeMallocSizeOf, MaybeSerializeDeserialize, + Member, SimpleBitOps, +}; +use sp_std::str::FromStr; + +/// Minimal Substrate-based chain representation that may be used from no_std environment. +pub trait Chain: Send + Sync + 'static { + /// A type that fulfills the abstract idea of what a Substrate block number is. + // Constraits come from the associated Number type of `sp_runtime::traits::Header` + // See here for more info: + // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Number + // + // Note that the `AsPrimitive` trait is required by the GRANDPA justification + // verifier, and is not usually part of a Substrate Header's Number type. + type BlockNumber: Parameter + + Member + + MaybeSerializeDeserialize + + sp_std::hash::Hash + + Copy + + Default + + MaybeDisplay + + AtLeast32BitUnsigned + + FromStr + + MaybeMallocSizeOf + + AsPrimitive + + Default; + + /// A type that fulfills the abstract idea of what a Substrate hash is. + // Constraits come from the associated Hash type of `sp_runtime::traits::Header` + // See here for more info: + // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hash + type Hash: Parameter + + Member + + MaybeSerializeDeserialize + + sp_std::hash::Hash + + Ord + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf; + + /// A type that fulfills the abstract idea of what a Substrate hasher (a type + /// that produces hashes) is. + // Constraits come from the associated Hashing type of `sp_runtime::traits::Header` + // See here for more info: + // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hashing + type Hasher: HashT; + + /// A type that fulfills the abstract idea of what a Substrate header is. + // See here for more info: + // https://crates.parity.io/sp_runtime/traits/trait.Header.html + type Header: Parameter + HeaderT + MaybeSerializeDeserialize; +} + +/// Block number used by the chain. +pub type BlockNumberOf = ::BlockNumber; + +/// Hash type used by the chain. +pub type HashOf = ::Hash; + +/// Hasher type used by the chain. +pub type HasherOf = ::Hasher; + +/// Header type used by the chain. +pub type HeaderOf = ::Header; diff --git a/polkadot/primitives/runtime/src/lib.rs b/polkadot/primitives/runtime/src/lib.rs new file mode 100644 index 00000000000..e7f990d2830 --- /dev/null +++ b/polkadot/primitives/runtime/src/lib.rs @@ -0,0 +1,136 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Primitives that may be used at (bridges) runtime level. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Encode; +use sp_core::hash::H256; +use sp_io::hashing::blake2_256; +use sp_std::convert::TryFrom; + +pub use chain::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; +pub use storage_proof::{Error as StorageProofError, StorageProofChecker}; + +#[cfg(feature = "std")] +pub use storage_proof::craft_valid_storage_proof; + +mod chain; +mod storage_proof; + +/// Use this when something must be shared among all instances. +pub const NO_INSTANCE_ID: InstanceId = [0, 0, 0, 0]; + +/// Bridge-with-Rialto instance id. +pub const RIALTO_BRIDGE_INSTANCE: InstanceId = *b"rlto"; + +/// Bridge-with-Millau instance id. +pub const MILLAU_BRIDGE_INSTANCE: InstanceId = *b"mlau"; + +/// Bridge-with-Polkadot instance id. +pub const POLKADOT_BRIDGE_INSTANCE: InstanceId = *b"pdot"; + +/// Bridge-with-Kusama instance id. +pub const KUSAMA_BRIDGE_INSTANCE: InstanceId = *b"ksma"; + +/// Bridge-with-Rococo instance id. +pub const ROCOCO_BRIDGE_INSTANCE: InstanceId = *b"roco"; + +/// Bridge-with-Westend instance id. +pub const WESTEND_BRIDGE_INSTANCE: InstanceId = *b"wend"; + +/// Call-dispatch module prefix. +pub const CALL_DISPATCH_MODULE_PREFIX: &[u8] = b"pallet-bridge/dispatch"; + +/// A unique prefix for entropy when generating cross-chain account IDs. +pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/account"; + +/// A unique prefix for entropy when generating a cross-chain account ID for the Root account. +pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root"; + +/// Id of deployed module instance. We have a bunch of pallets that may be used in +/// different bridges. E.g. messages pallet may be deployed twice in the same +/// runtime to bridge ThisChain with Chain1 and Chain2. Sometimes we need to be able +/// to identify deployed instance dynamically. This type is used for that. +pub type InstanceId = [u8; 4]; + +/// Type of accounts on the source chain. +pub enum SourceAccount { + /// An account that belongs to Root (priviledged origin). + Root, + /// A non-priviledged account. + /// + /// The embedded account ID may or may not have a private key depending on the "owner" of the + /// account (private key, pallet, proxy, etc.). + Account(T), +} + +/// Derive an account ID from a foreign account ID. +/// +/// This function returns an encoded Blake2 hash. It is the responsibility of the caller to ensure +/// this can be succesfully decoded into an AccountId. +/// +/// The `bridge_id` is used to provide extra entropy when producing account IDs. This helps prevent +/// AccountId collisions between different bridges on a single target chain. +/// +/// Note: If the same `bridge_id` is used across different chains (for example, if one source chain +/// is bridged to multiple target chains), then all the derived accounts would be the same across +/// the different chains. This could negatively impact users' privacy across chains. +pub fn derive_account_id(bridge_id: InstanceId, id: SourceAccount) -> H256 +where + AccountId: Encode, +{ + match id { + SourceAccount::Root => (ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256), + SourceAccount::Account(id) => (ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256), + } + .into() +} + +/// Derive the account ID of the shared relayer fund account. +/// +/// This account is used to collect fees for relayers that are passing messages across the bridge. +/// +/// The account ID can be the same across different instances of `pallet-bridge-messages` if the same +/// `bridge_id` is used. +pub fn derive_relayer_fund_account_id(bridge_id: InstanceId) -> H256 { + ("relayer-fund-account", bridge_id).using_encoded(blake2_256).into() +} + +/// Anything that has size. +pub trait Size { + /// Return approximate size of this object (in bytes). + /// + /// This function should be lightweight. The result should not necessary be absolutely + /// accurate. + fn size_hint(&self) -> u32; +} + +impl Size for () { + fn size_hint(&self) -> u32 { + 0 + } +} + +/// Pre-computed size. +pub struct PreComputedSize(pub usize); + +impl Size for PreComputedSize { + fn size_hint(&self) -> u32 { + u32::try_from(self.0).unwrap_or(u32::MAX) + } +} diff --git a/polkadot/primitives/runtime/src/storage_proof.rs b/polkadot/primitives/runtime/src/storage_proof.rs new file mode 100644 index 00000000000..d70be93b1d2 --- /dev/null +++ b/polkadot/primitives/runtime/src/storage_proof.rs @@ -0,0 +1,112 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Logic for checking Substrate storage proofs. + +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use sp_runtime::RuntimeDebug; +use sp_std::vec::Vec; +use sp_trie::{read_trie_value, Layout, MemoryDB, StorageProof}; + +/// This struct is used to read storage values from a subset of a Merklized database. The "proof" +/// is a subset of the nodes in the Merkle structure of the database, so that it provides +/// authentication against a known Merkle root as well as the values in the database themselves. +pub struct StorageProofChecker +where + H: Hasher, +{ + root: H::Out, + db: MemoryDB, +} + +impl StorageProofChecker +where + H: Hasher, +{ + /// Constructs a new storage proof checker. + /// + /// This returns an error if the given proof is invalid with respect to the given root. + pub fn new(root: H::Out, proof: StorageProof) -> Result { + let db = proof.into_memory_db(); + if !db.contains(&root, EMPTY_PREFIX) { + return Err(Error::StorageRootMismatch); + } + + let checker = StorageProofChecker { root, db }; + Ok(checker) + } + + /// Reads a value from the available subset of storage. If the value cannot be read due to an + /// incomplete or otherwise invalid proof, this returns an error. + pub fn read_value(&self, key: &[u8]) -> Result>, Error> { + read_trie_value::, _>(&self.db, &self.root, key).map_err(|_| Error::StorageValueUnavailable) + } +} + +#[derive(RuntimeDebug, PartialEq)] +pub enum Error { + StorageRootMismatch, + StorageValueUnavailable, +} + +/// Return valid storage proof and state root. +/// +/// NOTE: This should only be used for **testing**. +#[cfg(feature = "std")] +pub fn craft_valid_storage_proof() -> (sp_core::H256, StorageProof) { + use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; + + // construct storage proof + let backend = >::from(vec![ + (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), + (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), + (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), + // Value is too big to fit in a branch node + (None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]), + ]); + let root = backend.storage_root(std::iter::empty()).0; + let proof = StorageProof::new( + prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key22"[..]]) + .unwrap() + .iter_nodes() + .collect(), + ); + + (root, proof) +} + +#[cfg(test)] +pub mod tests { + use super::*; + + #[test] + fn storage_proof_check() { + let (root, proof) = craft_valid_storage_proof(); + + // check proof in runtime + let checker = >::new(root, proof.clone()).unwrap(); + assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); + assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); + assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); + assert_eq!(checker.read_value(b"key22"), Ok(None)); + + // checking proof against invalid commitment fails + assert_eq!( + >::new(sp_core::H256::random(), proof).err(), + Some(Error::StorageRootMismatch) + ); + } +} diff --git a/polkadot/primitives/test-utils/Cargo.toml b/polkadot/primitives/test-utils/Cargo.toml new file mode 100644 index 00000000000..5adb2c2b55f --- /dev/null +++ b/polkadot/primitives/test-utils/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "bp-test-utils" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +bp-header-chain = { path = "../header-chain", default-features = false } +ed25519-dalek = { version = "1.0", default-features = false, features = ["u64_backend"] } +finality-grandpa = { version = "0.14.0", default-features = false } +parity-scale-codec = { version = "2.0.0", default-features = false } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[features] +default = ["std"] +std = [ + "bp-header-chain/std", + "ed25519-dalek/std", + "finality-grandpa/std", + "parity-scale-codec/std", + "sp-application-crypto/std", + "sp-finality-grandpa/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/polkadot/primitives/test-utils/src/keyring.rs b/polkadot/primitives/test-utils/src/keyring.rs new file mode 100644 index 00000000000..6c5b1cae911 --- /dev/null +++ b/polkadot/primitives/test-utils/src/keyring.rs @@ -0,0 +1,96 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Utilities for working with test accounts. + +use ed25519_dalek::{Keypair, PublicKey, SecretKey, Signature}; +use finality_grandpa::voter_set::VoterSet; +use parity_scale_codec::Encode; +use sp_application_crypto::Public; +use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight}; +use sp_runtime::RuntimeDebug; +use sp_std::prelude::*; + +/// Set of test accounts with friendly names. +pub const ALICE: Account = Account(0); +pub const BOB: Account = Account(1); +pub const CHARLIE: Account = Account(2); +pub const DAVE: Account = Account(3); +pub const EVE: Account = Account(4); +pub const FERDIE: Account = Account(5); + +/// A test account which can be used to sign messages. +#[derive(RuntimeDebug, Clone, Copy)] +pub struct Account(pub u16); + +impl Account { + pub fn public(&self) -> PublicKey { + (&self.secret()).into() + } + + pub fn secret(&self) -> SecretKey { + let data = self.0.encode(); + let mut bytes = [0_u8; 32]; + bytes[0..data.len()].copy_from_slice(&*data); + SecretKey::from_bytes(&bytes).expect("A static array of the correct length is a known good.") + } + + pub fn pair(&self) -> Keypair { + let mut pair: [u8; 64] = [0; 64]; + + let secret = self.secret(); + pair[..32].copy_from_slice(&secret.to_bytes()); + + let public = self.public(); + pair[32..].copy_from_slice(&public.to_bytes()); + + Keypair::from_bytes(&pair).expect("We expect the SecretKey to be good, so this must also be good.") + } + + pub fn sign(&self, msg: &[u8]) -> Signature { + use ed25519_dalek::Signer; + self.pair().sign(msg) + } +} + +impl From for AuthorityId { + fn from(p: Account) -> Self { + AuthorityId::from_slice(&p.public().to_bytes()) + } +} + +/// Get a valid set of voters for a Grandpa round. +pub fn voter_set() -> VoterSet { + VoterSet::new(authority_list()).unwrap() +} + +/// Convenience function to get a list of Grandpa authorities. +pub fn authority_list() -> AuthorityList { + test_keyring() + .iter() + .map(|(id, w)| (AuthorityId::from(*id), *w)) + .collect() +} + +/// Get the corresponding identities from the keyring for the "standard" authority set. +pub fn test_keyring() -> Vec<(Account, AuthorityWeight)> { + vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)] +} + +/// Get a list of "unique" accounts. +pub fn accounts(len: u16) -> Vec { + (0..len).into_iter().map(Account).collect() +} diff --git a/polkadot/primitives/test-utils/src/lib.rs b/polkadot/primitives/test-utils/src/lib.rs new file mode 100644 index 00000000000..0fcc263763c --- /dev/null +++ b/polkadot/primitives/test-utils/src/lib.rs @@ -0,0 +1,237 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Utilities for testing runtime code. + +#![cfg_attr(not(feature = "std"), no_std)] + +use bp_header_chain::justification::GrandpaJustification; +use sp_application_crypto::TryFrom; +use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; +use sp_finality_grandpa::{AuthoritySignature, SetId}; +use sp_runtime::traits::{Header as HeaderT, One, Zero}; +use sp_std::prelude::*; + +// Re-export all our test account utilities +pub use keyring::*; + +mod keyring; + +pub const TEST_GRANDPA_ROUND: u64 = 1; +pub const TEST_GRANDPA_SET_ID: SetId = 1; + +/// Configuration parameters when generating test GRANDPA justifications. +#[derive(Clone)] +pub struct JustificationGeneratorParams { + /// The header which we want to finalize. + pub header: H, + /// The GRANDPA round number for the current authority set. + pub round: u64, + /// The current authority set ID. + pub set_id: SetId, + /// The current GRANDPA authority set. + /// + /// The size of the set will determine the number of pre-commits in our justification. + pub authorities: Vec<(Account, AuthorityWeight)>, + /// The total number of vote ancestries in our justification. + /// + /// These may be distributed among many different forks. + pub votes: u32, + /// The number of forks. + /// + /// Useful for creating a "worst-case" scenario in which each authority is on its own fork. + pub forks: u32, +} + +impl Default for JustificationGeneratorParams { + fn default() -> Self { + Self { + header: test_header(One::one()), + round: TEST_GRANDPA_ROUND, + set_id: TEST_GRANDPA_SET_ID, + authorities: test_keyring(), + votes: 2, + forks: 1, + } + } +} + +/// Make a valid GRANDPA justification with sensible defaults +pub fn make_default_justification(header: &H) -> GrandpaJustification { + let params = JustificationGeneratorParams:: { + header: header.clone(), + ..Default::default() + }; + + make_justification_for_header(params) +} + +/// Generate justifications in a way where we are able to tune the number of pre-commits +/// and vote ancestries which are included in the justification. +/// +/// This is useful for benchmarkings where we want to generate valid justifications with +/// a specific number of pre-commits (tuned with the number of "authorities") and/or a specific +/// number of vote ancestries (tuned with the "votes" parameter). +/// +/// Note: This needs at least three authorities or else the verifier will complain about +/// being given an invalid commit. +pub fn make_justification_for_header(params: JustificationGeneratorParams) -> GrandpaJustification { + let JustificationGeneratorParams { + header, + round, + set_id, + authorities, + mut votes, + forks, + } = params; + + let (target_hash, target_number) = (header.hash(), *header.number()); + let mut precommits = vec![]; + let mut votes_ancestries = vec![]; + + assert!(forks != 0, "Need at least one fork to have a chain.."); + assert!(votes >= forks, "Need at least one header per fork."); + assert!( + forks as usize <= authorities.len(), + "If we have more forks than authorities we can't create valid pre-commits for all the forks." + ); + + // Roughly, how many vote ancestries do we want per fork + let target_depth = (votes + forks - 1) / forks; + + let mut unsigned_precommits = vec![]; + for i in 0..forks { + let depth = if votes >= target_depth { + votes -= target_depth; + target_depth + } else { + votes + }; + + // Note: Adding 1 to account for the target header + let chain = generate_chain(i as u8, depth + 1, &header); + + // We don't include our finality target header in the vote ancestries + for child in &chain[1..] { + votes_ancestries.push(child.clone()); + } + + // The header we need to use when pre-commiting is the one at the highest height + // on our chain. + let precommit_candidate = chain.last().map(|h| (h.hash(), *h.number())).unwrap(); + unsigned_precommits.push(precommit_candidate); + } + + for (i, (id, _weight)) in authorities.iter().enumerate() { + // Assign authorities to sign pre-commits in a round-robin fashion + let target = unsigned_precommits[i % forks as usize]; + let precommit = signed_precommit::(&id, target, round, set_id); + + precommits.push(precommit); + } + + GrandpaJustification { + round, + commit: finality_grandpa::Commit { + target_hash, + target_number, + precommits, + }, + votes_ancestries, + } +} + +fn generate_chain(fork_id: u8, depth: u32, ancestor: &H) -> Vec { + let mut headers = vec![ancestor.clone()]; + + for i in 1..depth { + let parent = &headers[(i - 1) as usize]; + let (hash, num) = (parent.hash(), *parent.number()); + + let mut header = test_header::(num + One::one()); + header.set_parent_hash(hash); + + // Modifying the digest so headers at the same height but in different forks have different + // hashes + header + .digest_mut() + .logs + .push(sp_runtime::DigestItem::Other(vec![fork_id])); + + headers.push(header); + } + + headers +} + +fn signed_precommit( + signer: &Account, + target: (H::Hash, H::Number), + round: u64, + set_id: SetId, +) -> finality_grandpa::SignedPrecommit { + let precommit = finality_grandpa::Precommit { + target_hash: target.0, + target_number: target.1, + }; + + let encoded = + sp_finality_grandpa::localized_payload(round, set_id, &finality_grandpa::Message::Precommit(precommit.clone())); + + let signature = signer.sign(&encoded); + let raw_signature: Vec = signature.to_bytes().into(); + + // Need to wrap our signature and id types that they match what our `SignedPrecommit` is expecting + let signature = AuthoritySignature::try_from(raw_signature).expect( + "We know our Keypair is good, + so our signature must also be good.", + ); + let id = (*signer).into(); + + finality_grandpa::SignedPrecommit { + precommit, + signature, + id, + } +} + +/// Get a header for testing. +/// +/// The correct parent hash will be used if given a non-zero header. +pub fn test_header(number: H::Number) -> H { + let default = |num| { + H::new( + num, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + }; + + let mut header = default(number); + if number != Zero::zero() { + let parent_hash = default(number - One::one()).hash(); + header.set_parent_hash(parent_hash); + } + + header +} + +/// Convenience function for generating a Header ID at a given block number. +pub fn header_id(index: u8) -> (H::Hash, H::Number) { + (test_header::(index.into()).hash(), index.into()) +} diff --git a/polkadot/relays/bin-ethereum/Cargo.toml b/polkadot/relays/bin-ethereum/Cargo.toml new file mode 100644 index 00000000000..efd9c0194b2 --- /dev/null +++ b/polkadot/relays/bin-ethereum/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "ethereum-poa-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +ansi_term = "0.12" +async-std = "1.9.0" +async-trait = "0.1.42" +clap = { version = "2.33.3", features = ["yaml"] } +codec = { package = "parity-scale-codec", version = "2.0.0" } +env_logger = "0.8.3" +ethabi = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } +ethabi-contract = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } +ethabi-derive = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" } +futures = "0.3.12" +hex = "0.4" +hex-literal = "0.3" +libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } +log = "0.4.14" +num-traits = "0.2" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.64" +time = "0.2" + +# Bridge dependencies + +bp-currency-exchange = { path = "../../primitives/currency-exchange" } +bp-eth-poa = { path = "../../primitives/ethereum-poa" } +exchange-relay = { path = "../exchange" } +headers-relay = { path = "../headers" } +messages-relay = { path = "../messages" } +relay-ethereum-client = { path = "../client-ethereum" } +relay-rialto-client = { path = "../client-rialto" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } +rialto-runtime = { path = "../../bin/rialto/runtime" } + +# Substrate Dependencies + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/bin-ethereum/README.md b/polkadot/relays/bin-ethereum/README.md new file mode 100644 index 00000000000..9fe2f623fd0 --- /dev/null +++ b/polkadot/relays/bin-ethereum/README.md @@ -0,0 +1,7 @@ +# PoA <> Substrate Bridge + +**DISCLAIMER:** *we recommend not using the bridge in "production" (to bridge significant amounts) just yet. +it's missing a code audit and should still be considered alpha. we can't rule out that there are bugs that might result in loss of the bridged amounts. +we'll update this disclaimer once that changes* + +These docs are very incomplete yet. Describe high-level goals here in the (near) future. diff --git a/polkadot/relays/bin-ethereum/res/substrate-bridge-abi.json b/polkadot/relays/bin-ethereum/res/substrate-bridge-abi.json new file mode 100644 index 00000000000..b7d7b4b9152 --- /dev/null +++ b/polkadot/relays/bin-ethereum/res/substrate-bridge-abi.json @@ -0,0 +1,167 @@ +[ + { + "inputs": [ + { + "internalType": "bytes", + "name": "rawInitialHeader", + "type": "bytes" + }, + { + "internalType": "uint64", + "name": "initialValidatorsSetId", + "type": "uint64" + }, + { + "internalType": "bytes", + "name": "initialValidatorsSet", + "type": "bytes" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "stateMutability": "nonpayable", + "type": "fallback" + }, + { + "inputs": [], + "name": "bestKnownHeader", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "finalityTargetNumber", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "finalityTargetHash", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "rawFinalityProof", + "type": "bytes" + } + ], + "name": "importFinalityProof", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "rawHeader1", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "rawHeader2", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "rawHeader3", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "rawHeader4", + "type": "bytes" + } + ], + "name": "importHeaders", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "incompleteHeaders", + "outputs": [ + { + "internalType": "uint256[]", + "name": "", + "type": "uint256[]" + }, + { + "internalType": "bytes32[]", + "name": "", + "type": "bytes32[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes", + "name": "rawHeader1", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "rawHeader2", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "rawHeader3", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "rawHeader4", + "type": "bytes" + } + ], + "name": "isIncompleteHeaders", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "headerHash", + "type": "bytes32" + } + ], + "name": "isKnownHeader", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } +] diff --git a/polkadot/relays/bin-ethereum/res/substrate-bridge-bytecode.hex b/polkadot/relays/bin-ethereum/res/substrate-bridge-bytecode.hex new file mode 100644 index 00000000000..6dd6a33046f --- /dev/null +++ b/polkadot/relays/bin-ethereum/res/substrate-bridge-bytecode.hex @@ -0,0 +1 @@ +60806040523480156200001157600080fd5b5060405162001af838038062001af8833981810160405260608110156200003757600080fd5b81019080805160405193929190846401000000008211156200005857600080fd5b9083019060208201858111156200006e57600080fd5b82516401000000008111828201881017156200008957600080fd5b82525081516020918201929091019080838360005b83811015620000b85781810151838201526020016200009e565b50505050905090810190601f168015620000e65780820380516001836020036101000a031916815260200191505b506040818152602083015192018051929491939192846401000000008211156200010f57600080fd5b9083019060208201858111156200012557600080fd5b82516401000000008111828201881017156200014057600080fd5b82525081516020918201929091019080838360005b838110156200016f57818101518382015260200162000155565b50505050905090810190601f1680156200019d5780820380516001836020036101000a031916815260200191505b50604052505050620001ae620003d5565b620001c2846001600160e01b03620002dc16565b805160008181556002918255604080840180516001908155825160e08101845281815260208088015181830190815293518286019081526080808a0151606085019081526001600160401b038e169185019190915260a0840188905260c084018890528951885260078352959096208251815460ff191690151517815593519284019290925593519482019490945590518051949550919390926200026f9260038501929101906200040a565b506080820151600482810180546001600160401b03199081166001600160401b039485161790915560a0850151600585015560c09094015160069093019290925560038054909316908616179091558251620002d1919060208501906200040a565b5050505050620004af565b620002e6620003d5565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa6200031c57600080fd5b84519b5083519a50825199508151985080519750505050505050506060816001600160401b03811180156200035057600080fd5b506040519080825280601f01601f1916602001820160405280156200037c576020820181803683370190505b5090508115620003a85787516020890160208301848184846011600019fa620003a457600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106200044d57805160ff19168380011785556200047d565b828001600101855582156200047d579182015b828111156200047d57825182559160200191906001019062000460565b506200048b9291506200048f565b5090565b620004ac91905b808211156200048b576000815560010162000496565b90565b61163980620004bf6000396000f3fe608060405234801561001057600080fd5b50600436106100625760003560e01c8063374c2c26146100675780636a742c0914610108578063871ebe181461033d578063d96a2deb1461036e578063e8ffbe841461038f578063fae71ae8146105d4575b600080fd5b61006f610684565b604051808060200180602001838103835285818151815260200191508051906020019060200280838360005b838110156100b357818101518382015260200161009b565b50505050905001838103825284818151815260200191508051906020019060200280838360005b838110156100f25781810151838201526020016100da565b5050505090500194505050505060405180910390f35b61033b6004803603608081101561011e57600080fd5b810190602081018135600160201b81111561013857600080fd5b82018360208201111561014a57600080fd5b803590602001918460018302840111600160201b8311171561016b57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156101bd57600080fd5b8201836020820111156101cf57600080fd5b803590602001918460018302840111600160201b831117156101f057600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561024257600080fd5b82018360208201111561025457600080fd5b803590602001918460018302840111600160201b8311171561027557600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156102c757600080fd5b8201836020820111156102d957600080fd5b803590602001918460018302840111600160201b831117156102fa57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610789945050505050565b005b61035a6004803603602081101561035357600080fd5b50356107e5565b604080519115158252519081900360200190f35b6103766107fd565b6040805192835260208301919091528051918290030190f35b6105c2600480360360808110156103a557600080fd5b810190602081018135600160201b8111156103bf57600080fd5b8201836020820111156103d157600080fd5b803590602001918460018302840111600160201b831117156103f257600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561044457600080fd5b82018360208201111561045657600080fd5b803590602001918460018302840111600160201b8311171561047757600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b8111156104c957600080fd5b8201836020820111156104db57600080fd5b803590602001918460018302840111600160201b831117156104fc57600080fd5b91908080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152509295949360208101935035915050600160201b81111561054e57600080fd5b82018360208201111561056057600080fd5b803590602001918460018302840111600160201b8311171561058157600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610815945050505050565b60408051918252519081900360200190f35b61033b600480360360608110156105ea57600080fd5b813591602081013591810190606081016040820135600160201b81111561061057600080fd5b82018360208201111561062257600080fd5b803590602001918460018302840111600160201b8311171561064357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610b28945050505050565b6005546060908190818167ffffffffffffffff811180156106a457600080fd5b506040519080825280602002602001820160405280156106ce578160200160208202803683370190505b50905060005b828110156107295760076000600583815481106106ed57fe5b906000526020600020015481526020019081526020016000206002015482828151811061071657fe5b60209081029190910101526001016106d4565b508060058080548060200260200160405190810160405280929190818152602001828054801561077857602002820191906000526020600020905b815481526020019060010190808311610764575b505050505090509350935050509091565b61079284610d8d565b61079b576107df565b8251156107b4576107ab83610d8d565b6107b4576107df565b8151156107cd576107c482610d8d565b6107cd576107df565b8051156107df576107dd81610d8d565b505b50505050565b60008181526007602052604090205460ff165b919050565b60008054808252600760205260409091206002015491565b600061081f611454565b61082886610f0e565b9050610832611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156108f85780601f106108cd576101008083540402835291602001916108f8565b820191906000526020600020905b8154815290600101906020018083116108db57829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506000806109398484611001565b945050505091506000600681111561094d57fe5b82600681111561095957fe5b146109ab576040805162461bcd60e51b815260206004820152601860248201527f43616e277420696d706f727420616e7920686561646572730000000000000000604482015290519081900360640190fd5b83604001518114156109c4576001945050505050610b20565b87516109d7576000945050505050610b20565b6109df611489565b6109e98585611171565b90506109f3611454565b6109fc8a610f0e565b90506000610a0a8284611001565b9450505050508160400151811415610a2c576002975050505050505050610b20565b8951610a42576000975050505050505050610b20565b610a4a611489565b610a548388611171565b9050610a5e611454565b610a678c610f0e565b90506000610a758284611001565b9450505050508160400151811415610a9a5760039a5050505050505050505050610b20565b8b51610ab35760009a5050505050505050505050610b20565b610abb611489565b610ac5838b611171565b9050610acf611454565b610ad88e610f0e565b90506000610ae68284611001565b9450505050508160400151811415610b0e5760049d5050505050505050505050505050610b20565b60009d50505050505050505050505050505b949350505050565b6000828152600760205260409020600201548314610b775760405162461bcd60e51b815260040180806020018281038252602f8152602001806115d5602f913960400191505060405180910390fd5b60028054600354600480546040805160206101006001851615026000190190931696909604601f81018390048302870183019091528086529394600094610c28948a948a9467ffffffffffffffff90921693929091830182828015610c1d5780601f10610bf257610100808354040283529160200191610c1d565b820191906000526020600020905b815481529060010190602001808311610c0057829003601f168201915b5050505050876111d0565b600081815260076020526040902060028281558101546001559091505b828214610d8557506000818152600760209081526040808320600181015460069093529220549092908015610d07576005546000199182019181018214610cd357600060056001830381548110610c9857fe5b906000526020600020015490508060058481548110610cb357fe5b600091825260208083209091019290925591825260069052604090208290555b6005805480610cde57fe5b600082815260208082208301600019908101839055909201909255848252600690526040812055505b826006015483600201541415610d7e57600583015460009081526007602052604090206003805467ffffffffffffffff198116600167ffffffffffffffff92831681019092161782559082018054610d759260049291600261010092821615929092026000190116046114c4565b50505050610d85565b5050610c45565b505050505050565b600080610d98611454565b6000806000610da687611312565b9398509196509450925090506000856006811115610dc057fe5b14610dd3576000955050505050506107f8565b604084015181148015610e27576005805486516001820180845560009384527f036b6384b5eca791c62761152d0c79bb0604c104a5fb6f4eb0703f3154bb3db0909201558651825260066020526040909120555b6040805160e0810182526001808252602088810151818401908152898501518486019081526080808c01516060870190815267ffffffffffffffff8c169187019190915260a086018a905260c086018990528b51600090815260078552969096208551815460ff1916901515178155915193820193909355915160028301559251805192939192610ebe9260038501920190611549565b50608082015160048201805467ffffffffffffffff191667ffffffffffffffff90921691909117905560a0820151600582015560c090910151600690910155935160005550509015949350505050565b610f16611454565b60008060008060008651602088016040516020810160208101602081016020810160a08588886010600019fa610f4b57600080fd5b84519b5083519a508251995081519850805197505050505050505060608167ffffffffffffffff81118015610f7f57600080fd5b506040519080825280601f01601f191660200182016040528015610faa576020820181803683370190505b5090508115610fd45787516020890160208301848184846011600019fa610fd057600080fd5b5050505b6040805160a081018252968752602087019590955293850192909252606084015250608082015292915050565b600061100b611454565b83516000908152600760205260408120548190819060ff161561103d5750600193508592506000915081905080611167565b60015487604001511161105f5750600293508592506000915081905080611167565b8551158061107857506001876040015103866040015114155b156110925750600393508592506000915081905080611167565b60c0860151158015906110ac575085604001518660c00151145b156110d3578660200151600254146110d35750600493508592506000915081905080611167565b60808087015160a088015160c0890151928a01515191929091156111585767ffffffffffffffff838116141561111d57506005965088955060009450849350839250611167915050565b8960400151811061114257506006965088955060009450849350839250611167915050565b50508751606089015160408a0151600190930192015b60009750899650919450925090505b9295509295909350565b611179611489565b506040805160e08101825260018082528451602083015293820151909301908301526060818101519083015260808082015167ffffffffffffffff169083015260a0808201519083015260c0908101519082015290565b600060608686868686604051602001808681526020018581526020018467ffffffffffffffff1667ffffffffffffffff1681526020018060200180602001838103835285818151815260200191508051906020019080838360005b8381101561124357818101518382015260200161122b565b50505050905090810190601f1680156112705780820380516001836020036101000a031916815260200191505b50838103825284518152845160209182019186019080838360005b838110156112a357818101518382015260200161128b565b50505050905090810190601f1680156112d05780820380516001836020036101000a031916815260200191505b50975050505050505050604051602081830303815290604052905080516020820160008083836012600019fa61130557600080fd5b5095979650505050505050565b600061131c611454565b6000806000611329611454565b61133287610f0e565b905061133c611489565b602082810151600090815260078252604090819020815160e081018352815460ff1615158152600180830154828601526002808401548386015260038401805486516101009482161594909402600019011691909104601f81018790048702830187019095528482529194929360608601939192918301828280156114025780601f106113d757610100808354040283529160200191611402565b820191906000526020600020905b8154815290600101906020018083116113e557829003601f168201915b5050509183525050600482015467ffffffffffffffff1660208201526005820154604082015260069091015460609091015290506114408282611001565b939c929b5090995097509095509350505050565b6040518060a0016040528060008019168152602001600080191681526020016000815260200160008152602001606081525090565b6040805160e0810182526000808252602082018190529181018290526060808201526080810182905260a0810182905260c081019190915290565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106114fd5780548555611539565b8280016001018555821561153957600052602060002091601f016020900482015b8281111561153957825482559160010191906001019061151e565b506115459291506115b7565b5090565b828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061158a57805160ff1916838001178555611539565b82800160010185558215611539579182015b8281111561153957825182559160200191906001019061159c565b6115d191905b8082111561154557600081556001016115bd565b9056fe4d697373696e672066696e616c69747920746172676574206865616465722066726f6d207468652073746f72616765a2646970667358221220edcaec08f93f74ce5be00b81da5d6b2276138571a33f1cfdca50e5047f854e6e64736f6c63430006060033 \ No newline at end of file diff --git a/polkadot/relays/bin-ethereum/res/substrate-bridge-metadata.txt b/polkadot/relays/bin-ethereum/res/substrate-bridge-metadata.txt new file mode 100644 index 00000000000..13b7daa9a8b --- /dev/null +++ b/polkadot/relays/bin-ethereum/res/substrate-bridge-metadata.txt @@ -0,0 +1,5 @@ +Last Change Date: 2020-07-30 +Solc version: 0.6.6+commit.6c089d02.Linux.g++ +Source hash (keccak256): 0xea5d6d744f69157adc2857166792aca139c0b5b186ba89c1011358fbcad90d7e +Source gist: https://github.com/svyatonik/substrate-bridge-sol/blob/6456d3e016c95cd5e6d5e817c23e9e69e739aa78/substrate-bridge.sol +Compiler flags used (command to produce the file): `docker run -i ethereum/solc:0.6.6 --optimize --bin - < substrate-bridge.sol` \ No newline at end of file diff --git a/polkadot/relays/bin-ethereum/src/cli.yml b/polkadot/relays/bin-ethereum/src/cli.yml new file mode 100644 index 00000000000..78971787c0e --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/cli.yml @@ -0,0 +1,166 @@ +name: ethsub-bridge +version: "0.1.0" +author: Parity Technologies +about: Parity Ethereum (PoA) <-> Substrate bridge +subcommands: + - eth-to-sub: + about: Synchronize headers from Ethereum node to Substrate node. + args: + - eth-host: ð-host + long: eth-host + value_name: ETH_HOST + help: Connect to Ethereum node websocket server at given host. + takes_value: true + - eth-port: ð-port + long: eth-port + value_name: ETH_PORT + help: Connect to Ethereum node websocket server at given port. + takes_value: true + - sub-host: &sub-host + long: sub-host + value_name: SUB_HOST + help: Connect to Substrate node websocket server at given host. + takes_value: true + - sub-port: &sub-port + long: sub-port + value_name: SUB_PORT + help: Connect to Substrate node websocket server at given port. + takes_value: true + - sub-tx-mode: + long: sub-tx-mode + value_name: MODE + help: Submit headers using signed (default) or unsigned transactions. Third mode - backup - submits signed transactions only when we believe that sync has stalled. + takes_value: true + possible_values: + - signed + - unsigned + - backup + - sub-signer: &sub-signer + long: sub-signer + value_name: SUB_SIGNER + help: The SURI of secret key to use when transactions are submitted to the Substrate node. + - sub-signer-password: &sub-signer-password + long: sub-signer-password + value_name: SUB_SIGNER_PASSWORD + help: The password for the SURI of secret key to use when transactions are submitted to the Substrate node. + - sub-pallet-instance: &sub-pallet-instance + long: instance + short: i + value_name: PALLET_INSTANCE + help: The instance of the bridge pallet the relay should follow. + takes_value: true + case_insensitive: true + possible_values: + - Rialto + - Kovan + default_value: Rialto + - no-prometheus: &no-prometheus + long: no-prometheus + help: Do not expose a Prometheus metric endpoint. + - prometheus-host: &prometheus-host + long: prometheus-host + value_name: PROMETHEUS_HOST + help: Expose Prometheus endpoint at given interface. + - prometheus-port: &prometheus-port + long: prometheus-port + value_name: PROMETHEUS_PORT + help: Expose Prometheus endpoint at given port. + - sub-to-eth: + about: Synchronize headers from Substrate node to Ethereum node. + args: + - eth-host: *eth-host + - eth-port: *eth-port + - eth-contract: + long: eth-contract + value_name: ETH_CONTRACT + help: Address of deployed bridge contract. + takes_value: true + - eth-chain-id: ð-chain-id + long: eth-chain-id + value_name: ETH_CHAIN_ID + help: Chain ID to use for signing. + - eth-signer: ð-signer + long: eth-signer + value_name: ETH_SIGNER + help: Hex-encoded secret to use when transactions are submitted to the Ethereum node. + - sub-host: *sub-host + - sub-port: *sub-port + - no-prometheus: *no-prometheus + - prometheus-host: *prometheus-host + - prometheus-port: *prometheus-port + - eth-deploy-contract: + about: Deploy Bridge contract on Ethereum node. + args: + - eth-host: *eth-host + - eth-port: *eth-port + - eth-signer: *eth-signer + - eth-chain-id: *eth-chain-id + - eth-contract-code: + long: eth-contract-code + value_name: ETH_CONTRACT_CODE + help: Bytecode of bridge contract. + takes_value: true + - sub-host: *sub-host + - sub-port: *sub-port + - sub-authorities-set-id: + long: sub-authorities-set-id + value_name: SUB_AUTHORITIES_SET_ID + help: ID of initial GRANDPA authorities set. + takes_value: true + - sub-authorities-set: + long: sub-authorities-set + value_name: SUB_AUTHORITIES_SET + help: Encoded initial GRANDPA authorities set. + takes_value: true + - sub-initial-header: + long: sub-initial-header + value_name: SUB_INITIAL_HEADER + help: Encoded initial Substrate header. + takes_value: true + - eth-submit-exchange-tx: + about: Submit lock funds transaction to Ethereum node. + args: + - eth-host: *eth-host + - eth-port: *eth-port + - eth-nonce: + long: eth-nonce + value_name: ETH_NONCE + help: Nonce that have to be used when building transaction. If not specified, read from PoA node. + takes_value: true + - eth-signer: *eth-signer + - eth-chain-id: *eth-chain-id + - eth-amount: + long: eth-amount + value_name: ETH_AMOUNT + help: Amount of ETH to lock (in wei). + takes_value: true + - sub-recipient: + long: sub-recipient + value_name: SUB_RECIPIENT + help: Hex-encoded Public key of funds recipient in Substrate chain. + takes_value: true + - eth-exchange-sub: + about: Submit proof of PoA lock funds transaction to Substrate node. + args: + - eth-host: *eth-host + - eth-port: *eth-port + - eth-start-with-block: + long: eth-start-with-block + value_name: ETH_START_WITH_BLOCK + help: Auto-relay transactions starting with given block number. If not specified, starts with best finalized Ethereum block (known to Substrate node) transactions. + takes_value: true + conflicts_with: + - eth-tx-hash + - eth-tx-hash: + long: eth-tx-hash + value_name: ETH_TX_HASH + help: Hash of the lock funds transaction. + takes_value: true + - sub-host: *sub-host + - sub-port: *sub-port + - sub-signer: *sub-signer + - sub-signer-password: *sub-signer-password + - sub-pallet-instance: *sub-pallet-instance + - no-prometheus: *no-prometheus + - prometheus-host: *prometheus-host + - prometheus-port: *prometheus-port diff --git a/polkadot/relays/bin-ethereum/src/ethereum_client.rs b/polkadot/relays/bin-ethereum/src/ethereum_client.rs new file mode 100644 index 00000000000..71a3f38859b --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/ethereum_client.rs @@ -0,0 +1,653 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::rpc_errors::RpcError; +use crate::substrate_sync_loop::QueuedRialtoHeader; + +use async_trait::async_trait; +use bp_eth_poa::signatures::secret_to_address; +use codec::{Decode, Encode}; +use ethabi::FunctionOutputDecoder; +use headers_relay::sync_types::SubmittedHeaders; +use relay_ethereum_client::{ + sign_and_submit_transaction, + types::{Address, CallRequest, HeaderId as EthereumHeaderId, Receipt, H256, U256}, + Client as EthereumClient, Error as EthereumNodeError, SigningParams as EthereumSigningParams, +}; +use relay_rialto_client::HeaderId as RialtoHeaderId; +use relay_utils::{HeaderId, MaybeConnectionError}; +use sp_runtime::EncodedJustification; +use std::collections::HashSet; + +// to encode/decode contract calls +ethabi_contract::use_contract!(bridge_contract, "res/substrate-bridge-abi.json"); + +type RpcResult = std::result::Result; + +/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated +/// interactions involving, for example, an Ethereum contract. +#[async_trait] +pub trait EthereumHighLevelRpc { + /// Returns best Substrate block that PoA chain knows of. + async fn best_substrate_block(&self, contract_address: Address) -> RpcResult; + + /// Returns true if Substrate header is known to Ethereum node. + async fn substrate_header_known( + &self, + contract_address: Address, + id: RialtoHeaderId, + ) -> RpcResult<(RialtoHeaderId, bool)>; + + /// Submits Substrate headers to Ethereum contract. + async fn submit_substrate_headers( + &self, + params: EthereumSigningParams, + contract_address: Address, + headers: Vec, + ) -> SubmittedHeaders; + + /// Returns ids of incomplete Substrate headers. + async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult>; + + /// Complete Substrate header. + async fn complete_substrate_header( + &self, + params: EthereumSigningParams, + contract_address: Address, + id: RialtoHeaderId, + justification: EncodedJustification, + ) -> RpcResult; + + /// Submit ethereum transaction. + async fn submit_ethereum_transaction( + &self, + params: &EthereumSigningParams, + contract_address: Option
, + nonce: Option, + double_gas: bool, + encoded_call: Vec, + ) -> RpcResult<()>; + + /// Retrieve transactions receipts for given block. + async fn transaction_receipts( + &self, + id: EthereumHeaderId, + transactions: Vec, + ) -> RpcResult<(EthereumHeaderId, Vec)>; +} + +#[async_trait] +impl EthereumHighLevelRpc for EthereumClient { + async fn best_substrate_block(&self, contract_address: Address) -> RpcResult { + let (encoded_call, call_decoder) = bridge_contract::functions::best_known_header::call(); + let call_request = CallRequest { + to: Some(contract_address), + data: Some(encoded_call.into()), + ..Default::default() + }; + + let call_result = self.eth_call(call_request).await?; + let (number, raw_hash) = call_decoder.decode(&call_result.0)?; + let hash = rialto_runtime::Hash::decode(&mut &raw_hash[..])?; + + if number != number.low_u32().into() { + return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)); + } + + Ok(HeaderId(number.low_u32(), hash)) + } + + async fn substrate_header_known( + &self, + contract_address: Address, + id: RialtoHeaderId, + ) -> RpcResult<(RialtoHeaderId, bool)> { + let (encoded_call, call_decoder) = bridge_contract::functions::is_known_header::call(id.1); + let call_request = CallRequest { + to: Some(contract_address), + data: Some(encoded_call.into()), + ..Default::default() + }; + + let call_result = self.eth_call(call_request).await?; + let is_known_block = call_decoder.decode(&call_result.0)?; + + Ok((id, is_known_block)) + } + + async fn submit_substrate_headers( + &self, + params: EthereumSigningParams, + contract_address: Address, + headers: Vec, + ) -> SubmittedHeaders { + // read nonce of signer + let address: Address = secret_to_address(¶ms.signer); + let nonce = match self.account_nonce(address).await { + Ok(nonce) => nonce, + Err(error) => { + return SubmittedHeaders { + submitted: Vec::new(), + incomplete: Vec::new(), + rejected: headers.iter().rev().map(|header| header.id()).collect(), + fatal_error: Some(error.into()), + } + } + }; + + // submit headers. Note that we're cloning self here. It is ok, because + // cloning `jsonrpsee::Client` only clones reference to background threads + submit_substrate_headers( + EthereumHeadersSubmitter { + client: self.clone(), + params, + contract_address, + nonce, + }, + headers, + ) + .await + } + + async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult> { + let (encoded_call, call_decoder) = bridge_contract::functions::incomplete_headers::call(); + let call_request = CallRequest { + to: Some(contract_address), + data: Some(encoded_call.into()), + ..Default::default() + }; + + let call_result = self.eth_call(call_request).await?; + + // Q: Is is correct to call these "incomplete_ids"? + let (incomplete_headers_numbers, incomplete_headers_hashes) = call_decoder.decode(&call_result.0)?; + let incomplete_ids = incomplete_headers_numbers + .into_iter() + .zip(incomplete_headers_hashes) + .filter_map(|(number, hash)| { + if number != number.low_u32().into() { + return None; + } + + Some(HeaderId(number.low_u32(), hash)) + }) + .collect(); + + Ok(incomplete_ids) + } + + async fn complete_substrate_header( + &self, + params: EthereumSigningParams, + contract_address: Address, + id: RialtoHeaderId, + justification: EncodedJustification, + ) -> RpcResult { + let _ = self + .submit_ethereum_transaction( + ¶ms, + Some(contract_address), + None, + false, + bridge_contract::functions::import_finality_proof::encode_input(id.0, id.1, justification), + ) + .await?; + + Ok(id) + } + + async fn submit_ethereum_transaction( + &self, + params: &EthereumSigningParams, + contract_address: Option
, + nonce: Option, + double_gas: bool, + encoded_call: Vec, + ) -> RpcResult<()> { + sign_and_submit_transaction(self, params, contract_address, nonce, double_gas, encoded_call) + .await + .map_err(Into::into) + } + + async fn transaction_receipts( + &self, + id: EthereumHeaderId, + transactions: Vec, + ) -> RpcResult<(EthereumHeaderId, Vec)> { + let mut transaction_receipts = Vec::with_capacity(transactions.len()); + for transaction in transactions { + let transaction_receipt = self.transaction_receipt(transaction).await?; + transaction_receipts.push(transaction_receipt); + } + Ok((id, transaction_receipts)) + } +} + +/// Max number of headers which can be sent to Solidity contract. +pub const HEADERS_BATCH: usize = 4; + +/// Substrate headers to send to the Ethereum light client. +/// +/// The Solidity contract can only accept a fixed number of headers in one go. +/// This struct is meant to encapsulate this limitation. +#[derive(Debug)] +#[cfg_attr(test, derive(Clone))] +pub struct HeadersBatch { + pub header1: QueuedRialtoHeader, + pub header2: Option, + pub header3: Option, + pub header4: Option, +} + +impl HeadersBatch { + /// Create new headers from given header & ids collections. + /// + /// This method will pop `HEADERS_BATCH` items from both collections + /// and construct `Headers` object and a vector of `RialtoHeaderId`s. + pub fn pop_from( + headers: &mut Vec, + ids: &mut Vec, + ) -> Result<(Self, Vec), ()> { + if headers.len() != ids.len() { + log::error!(target: "bridge", "Collection size mismatch ({} vs {})", headers.len(), ids.len()); + return Err(()); + } + + let header1 = headers.pop().ok_or(())?; + let header2 = headers.pop(); + let header3 = headers.pop(); + let header4 = headers.pop(); + + let mut submitting_ids = Vec::with_capacity(HEADERS_BATCH); + for _ in 0..HEADERS_BATCH { + submitting_ids.extend(ids.pop().iter()); + } + + Ok(( + Self { + header1, + header2, + header3, + header4, + }, + submitting_ids, + )) + } + + /// Returns unified array of headers. + /// + /// The first element is always `Some`. + fn headers(&self) -> [Option<&QueuedRialtoHeader>; HEADERS_BATCH] { + [ + Some(&self.header1), + self.header2.as_ref(), + self.header3.as_ref(), + self.header4.as_ref(), + ] + } + + /// Encodes all headers. If header is not present an empty vector will be returned. + pub fn encode(&self) -> [Vec; HEADERS_BATCH] { + let encode = |h: &QueuedRialtoHeader| h.header().encode(); + let headers = self.headers(); + [ + headers[0].map(encode).unwrap_or_default(), + headers[1].map(encode).unwrap_or_default(), + headers[2].map(encode).unwrap_or_default(), + headers[3].map(encode).unwrap_or_default(), + ] + } + /// Returns number of contained headers. + pub fn len(&self) -> usize { + let is_set = |h: &Option<&QueuedRialtoHeader>| if h.is_some() { 1 } else { 0 }; + self.headers().iter().map(is_set).sum() + } + + /// Remove headers starting from `idx` (0-based) from this collection. + /// + /// The collection will be left with `[0, idx)` headers. + /// Returns `Err` when `idx == 0`, since `Headers` must contain at least one header, + /// or when `idx > HEADERS_BATCH`. + pub fn split_off(&mut self, idx: usize) -> Result<(), ()> { + if idx == 0 || idx > HEADERS_BATCH { + return Err(()); + } + let mut vals: [_; HEADERS_BATCH] = [&mut None, &mut self.header2, &mut self.header3, &mut self.header4]; + for val in vals.iter_mut().skip(idx) { + **val = None; + } + Ok(()) + } +} + +/// Substrate headers submitter API. +#[async_trait] +trait HeadersSubmitter { + /// Returns Ok(0) if all given not-yet-imported headers are complete. + /// Returns Ok(index != 0) where index is 1-based index of first header that is incomplete. + /// + /// Returns Err(()) if contract has rejected headers. This means that the contract is + /// unable to import first header (e.g. it may already be imported). + async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult; + + /// Submit given headers to Ethereum node. + async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()>; +} + +/// Implementation of Substrate headers submitter that sends headers to running Ethereum node. +struct EthereumHeadersSubmitter { + client: EthereumClient, + params: EthereumSigningParams, + contract_address: Address, + nonce: U256, +} + +#[async_trait] +impl HeadersSubmitter for EthereumHeadersSubmitter { + async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { + let [h1, h2, h3, h4] = headers.encode(); + let (encoded_call, call_decoder) = bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4); + let call_request = CallRequest { + to: Some(self.contract_address), + data: Some(encoded_call.into()), + ..Default::default() + }; + + let call_result = self.client.eth_call(call_request).await?; + let incomplete_index: U256 = call_decoder.decode(&call_result.0)?; + if incomplete_index > HEADERS_BATCH.into() { + return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex)); + } + + Ok(incomplete_index.low_u32() as _) + } + + async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { + let [h1, h2, h3, h4] = headers.encode(); + let result = self + .client + .submit_ethereum_transaction( + &self.params, + Some(self.contract_address), + Some(self.nonce), + false, + bridge_contract::functions::import_headers::encode_input(h1, h2, h3, h4), + ) + .await; + + if result.is_ok() { + self.nonce += U256::one(); + } + + result + } +} + +/// Submit multiple Substrate headers. +async fn submit_substrate_headers( + mut header_submitter: impl HeadersSubmitter, + mut headers: Vec, +) -> SubmittedHeaders { + let mut submitted_headers = SubmittedHeaders::default(); + + let mut ids = headers.iter().map(|header| header.id()).rev().collect::>(); + headers.reverse(); + + while !headers.is_empty() { + let (headers, submitting_ids) = + HeadersBatch::pop_from(&mut headers, &mut ids).expect("Headers and ids are not empty; qed"); + + submitted_headers.fatal_error = + submit_substrate_headers_batch(&mut header_submitter, &mut submitted_headers, submitting_ids, headers) + .await; + + if submitted_headers.fatal_error.is_some() { + ids.reverse(); + submitted_headers.rejected.extend(ids); + break; + } + } + + submitted_headers +} + +/// Submit 4 Substrate headers in single PoA transaction. +async fn submit_substrate_headers_batch( + header_submitter: &mut impl HeadersSubmitter, + submitted_headers: &mut SubmittedHeaders, + mut ids: Vec, + mut headers: HeadersBatch, +) -> Option { + debug_assert_eq!(ids.len(), headers.len(),); + + // if parent of first header is either incomplete, or rejected, we assume that contract + // will reject this header as well + let parent_id = headers.header1.parent_id(); + if submitted_headers.rejected.contains(&parent_id) || submitted_headers.incomplete.contains(&parent_id) { + submitted_headers.rejected.extend(ids); + return None; + } + + // check if headers are incomplete + let incomplete_header_index = match header_submitter.is_headers_incomplete(&headers).await { + // All headers valid + Ok(0) => None, + Ok(incomplete_header_index) => Some(incomplete_header_index), + Err(error) => { + // contract has rejected all headers => we do not want to submit it + submitted_headers.rejected.extend(ids); + if error.is_connection_error() { + return Some(error); + } else { + return None; + } + } + }; + + // Modify `ids` and `headers` to only contain values that are going to be accepted. + let rejected = if let Some(idx) = incomplete_header_index { + let len = std::cmp::min(idx, ids.len()); + headers + .split_off(len) + .expect("len > 0, the case where all headers are valid is converted to None; qed"); + ids.split_off(len) + } else { + Vec::new() + }; + let submitted = ids; + let submit_result = header_submitter.submit_headers(headers).await; + match submit_result { + Ok(_) => { + if incomplete_header_index.is_some() { + submitted_headers.incomplete.extend(submitted.iter().last().cloned()); + } + submitted_headers.submitted.extend(submitted); + submitted_headers.rejected.extend(rejected); + None + } + Err(error) => { + submitted_headers.rejected.extend(submitted); + submitted_headers.rejected.extend(rejected); + Some(error) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_runtime::traits::Header; + + struct TestHeadersSubmitter { + incomplete: Vec, + failed: Vec, + } + + #[async_trait] + impl HeadersSubmitter for TestHeadersSubmitter { + async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult { + if self.incomplete.iter().any(|i| i.0 == headers.header1.id().0) { + Ok(1) + } else { + Ok(0) + } + } + + async fn submit_headers(&mut self, headers: HeadersBatch) -> RpcResult<()> { + if self.failed.iter().any(|i| i.0 == headers.header1.id().0) { + Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber)) + } else { + Ok(()) + } + } + } + + fn header(number: rialto_runtime::BlockNumber) -> QueuedRialtoHeader { + QueuedRialtoHeader::new( + rialto_runtime::Header::new( + number, + Default::default(), + Default::default(), + if number == 0 { + Default::default() + } else { + header(number - 1).id().1 + }, + Default::default(), + ) + .into(), + ) + } + + #[test] + fn descendants_of_incomplete_headers_are_not_submitted() { + let submitted_headers = async_std::task::block_on(submit_substrate_headers( + TestHeadersSubmitter { + incomplete: vec![header(5).id()], + failed: vec![], + }, + vec![header(5), header(6)], + )); + assert_eq!(submitted_headers.submitted, vec![header(5).id()]); + assert_eq!(submitted_headers.incomplete, vec![header(5).id()]); + assert_eq!(submitted_headers.rejected, vec![header(6).id()]); + assert!(submitted_headers.fatal_error.is_none()); + } + + #[test] + fn headers_after_fatal_error_are_not_submitted() { + let submitted_headers = async_std::task::block_on(submit_substrate_headers( + TestHeadersSubmitter { + incomplete: vec![], + failed: vec![header(9).id()], + }, + vec![ + header(5), + header(6), + header(7), + header(8), + header(9), + header(10), + header(11), + ], + )); + assert_eq!( + submitted_headers.submitted, + vec![header(5).id(), header(6).id(), header(7).id(), header(8).id()] + ); + assert_eq!(submitted_headers.incomplete, vec![]); + assert_eq!( + submitted_headers.rejected, + vec![header(9).id(), header(10).id(), header(11).id(),] + ); + assert!(submitted_headers.fatal_error.is_some()); + } + + fn headers_batch() -> HeadersBatch { + let mut init_headers = vec![header(1), header(2), header(3), header(4), header(5)]; + init_headers.reverse(); + let mut init_ids = init_headers.iter().map(|h| h.id()).collect(); + let (headers, ids) = HeadersBatch::pop_from(&mut init_headers, &mut init_ids).unwrap(); + assert_eq!(init_headers, vec![header(5)]); + assert_eq!(init_ids, vec![header(5).id()]); + assert_eq!( + ids, + vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()] + ); + headers + } + + #[test] + fn headers_batch_len() { + let headers = headers_batch(); + assert_eq!(headers.len(), 4); + } + + #[test] + fn headers_batch_encode() { + let headers = headers_batch(); + assert_eq!( + headers.encode(), + [ + header(1).header().encode(), + header(2).header().encode(), + header(3).header().encode(), + header(4).header().encode(), + ] + ); + } + + #[test] + fn headers_batch_split_off() { + // given + let mut headers = headers_batch(); + + // when + assert!(headers.split_off(0).is_err()); + assert_eq!(headers.header1, header(1)); + assert!(headers.header2.is_some()); + assert!(headers.header3.is_some()); + assert!(headers.header4.is_some()); + + // when + let mut h = headers.clone(); + h.split_off(1).unwrap(); + assert!(h.header2.is_none()); + assert!(h.header3.is_none()); + assert!(h.header4.is_none()); + + // when + let mut h = headers.clone(); + h.split_off(2).unwrap(); + assert!(h.header2.is_some()); + assert!(h.header3.is_none()); + assert!(h.header4.is_none()); + + // when + let mut h = headers.clone(); + h.split_off(3).unwrap(); + assert!(h.header2.is_some()); + assert!(h.header3.is_some()); + assert!(h.header4.is_none()); + + // when + let mut h = headers; + h.split_off(4).unwrap(); + assert!(h.header2.is_some()); + assert!(h.header3.is_some()); + assert!(h.header4.is_some()); + } +} diff --git a/polkadot/relays/bin-ethereum/src/ethereum_deploy_contract.rs b/polkadot/relays/bin-ethereum/src/ethereum_deploy_contract.rs new file mode 100644 index 00000000000..84c12be7a70 --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/ethereum_deploy_contract.rs @@ -0,0 +1,154 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::ethereum_client::{bridge_contract, EthereumHighLevelRpc}; +use crate::rpc_errors::RpcError; + +use codec::{Decode, Encode}; +use num_traits::Zero; +use relay_ethereum_client::{ + Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, +}; +use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto}; +use relay_substrate_client::{ + Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, OpaqueGrandpaAuthoritiesSet, +}; +use relay_utils::HeaderId; + +/// Ethereum synchronization parameters. +#[derive(Debug)] +pub struct EthereumDeployContractParams { + /// Ethereum connection params. + pub eth_params: EthereumConnectionParams, + /// Ethereum signing params. + pub eth_sign: EthereumSigningParams, + /// Ethereum contract bytecode. + pub eth_contract_code: Vec, + /// Substrate connection params. + pub sub_params: SubstrateConnectionParams, + /// Initial authorities set id. + pub sub_initial_authorities_set_id: Option, + /// Initial authorities set. + pub sub_initial_authorities_set: Option>, + /// Initial header. + pub sub_initial_header: Option>, +} + +/// Deploy Bridge contract on Ethereum chain. +pub async fn run(params: EthereumDeployContractParams) { + let EthereumDeployContractParams { + eth_params, + eth_sign, + sub_params, + sub_initial_authorities_set_id, + sub_initial_authorities_set, + sub_initial_header, + eth_contract_code, + } = params; + + let result = async move { + let eth_client = EthereumClient::new(eth_params).await.map_err(RpcError::Ethereum)?; + let sub_client = SubstrateClient::::new(sub_params).await.map_err(RpcError::Substrate)?; + + let (initial_header_id, initial_header) = prepare_initial_header(&sub_client, sub_initial_header).await?; + let initial_set_id = sub_initial_authorities_set_id.unwrap_or(0); + let initial_set = prepare_initial_authorities_set( + &sub_client, + initial_header_id.1, + sub_initial_authorities_set, + ).await?; + + log::info!( + target: "bridge", + "Deploying Ethereum contract.\r\n\tInitial header: {:?}\r\n\tInitial header id: {:?}\r\n\tInitial header encoded: {}\r\n\tInitial authorities set ID: {}\r\n\tInitial authorities set: {}", + initial_header, + initial_header_id, + hex::encode(&initial_header), + initial_set_id, + hex::encode(&initial_set), + ); + + deploy_bridge_contract( + ð_client, + ð_sign, + eth_contract_code, + initial_header, + initial_set_id, + initial_set, + ).await + }.await; + + if let Err(error) = result { + log::error!(target: "bridge", "{}", error); + } +} + +/// Prepare initial header. +async fn prepare_initial_header( + sub_client: &SubstrateClient, + sub_initial_header: Option>, +) -> Result<(RialtoHeaderId, Vec), String> { + match sub_initial_header { + Some(raw_initial_header) => match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) { + Ok(initial_header) => Ok(( + HeaderId(initial_header.number, initial_header.hash()), + raw_initial_header, + )), + Err(error) => Err(format!("Error decoding initial header: {}", error)), + }, + None => { + let initial_header = sub_client.header_by_number(Zero::zero()).await; + initial_header + .map(|header| (HeaderId(Zero::zero(), header.hash()), header.encode())) + .map_err(|error| format!("Error reading Substrate genesis header: {:?}", error)) + } + } +} + +/// Prepare initial GRANDPA authorities set. +async fn prepare_initial_authorities_set( + sub_client: &SubstrateClient, + sub_initial_header_hash: rialto_runtime::Hash, + sub_initial_authorities_set: Option>, +) -> Result { + let initial_authorities_set = match sub_initial_authorities_set { + Some(initial_authorities_set) => Ok(initial_authorities_set), + None => sub_client.grandpa_authorities_set(sub_initial_header_hash).await, + }; + + initial_authorities_set.map_err(|error| format!("Error reading GRANDPA authorities set: {:?}", error)) +} + +/// Deploy bridge contract to Ethereum chain. +async fn deploy_bridge_contract( + eth_client: &EthereumClient, + params: &EthereumSigningParams, + contract_code: Vec, + initial_header: Vec, + initial_set_id: u64, + initial_authorities: Vec, +) -> Result<(), String> { + eth_client + .submit_ethereum_transaction( + params, + None, + None, + false, + bridge_contract::constructor(contract_code, initial_header, initial_set_id, initial_authorities), + ) + .await + .map_err(|error| format!("Error deploying contract: {:?}", error)) +} diff --git a/polkadot/relays/bin-ethereum/src/ethereum_exchange.rs b/polkadot/relays/bin-ethereum/src/ethereum_exchange.rs new file mode 100644 index 00000000000..18470512b5d --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/ethereum_exchange.rs @@ -0,0 +1,403 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relaying proofs of PoA -> Substrate exchange transactions. + +use crate::instances::BridgeInstance; +use crate::rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc}; +use crate::rpc_errors::RpcError; +use crate::substrate_types::into_substrate_ethereum_receipt; + +use async_trait::async_trait; +use bp_currency_exchange::MaybeLockFundsTransaction; +use exchange_relay::exchange::{ + relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient, + TransactionProofPipeline, +}; +use exchange_relay::exchange_loop::{run as run_loop, InMemoryStorage}; +use relay_ethereum_client::{ + types::{ + HeaderId as EthereumHeaderId, HeaderWithTransactions as EthereumHeaderWithTransactions, + Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256, HEADER_ID_PROOF, + }, + Client as EthereumClient, ConnectionParams as EthereumConnectionParams, +}; +use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; +use relay_substrate_client::{ + Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, +}; +use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId}; +use rialto_runtime::exchange::EthereumTransactionInclusionProof; +use std::{sync::Arc, time::Duration}; + +/// Interval at which we ask Ethereum node for updates. +const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); + +/// Exchange relay mode. +#[derive(Debug)] +pub enum ExchangeRelayMode { + /// Relay single transaction and quit. + Single(EthereumTransactionHash), + /// Auto-relay transactions starting with given block. + Auto(Option), +} + +/// PoA exchange transaction relay params. +pub struct EthereumExchangeParams { + /// Ethereum connection params. + pub eth_params: EthereumConnectionParams, + /// Substrate connection params. + pub sub_params: SubstrateConnectionParams, + /// Substrate signing params. + pub sub_sign: RialtoSigningParams, + /// Relay working mode. + pub mode: ExchangeRelayMode, + /// Metrics parameters. + pub metrics_params: MetricsParams, + /// Instance of the bridge pallet being synchronized. + pub instance: Arc, +} + +impl std::fmt::Debug for EthereumExchangeParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("EthereumExchangeParams") + .field("eth_params", &self.eth_params) + .field("sub_params", &self.sub_params) + .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) + .field("mode", &self.mode) + .field("metrics_params", &self.metrics_params) + .field("instance", &self.instance) + .finish() + } +} + +/// Ethereum to Substrate exchange pipeline. +struct EthereumToSubstrateExchange; + +impl TransactionProofPipeline for EthereumToSubstrateExchange { + const SOURCE_NAME: &'static str = "Ethereum"; + const TARGET_NAME: &'static str = "Substrate"; + + type Block = EthereumSourceBlock; + type TransactionProof = EthereumTransactionInclusionProof; +} + +/// Ethereum source block. +struct EthereumSourceBlock(EthereumHeaderWithTransactions); + +impl SourceBlock for EthereumSourceBlock { + type Hash = H256; + type Number = u64; + type Transaction = EthereumSourceTransaction; + + fn id(&self) -> EthereumHeaderId { + HeaderId( + self.0.number.expect(HEADER_ID_PROOF).as_u64(), + self.0.hash.expect(HEADER_ID_PROOF), + ) + } + + fn transactions(&self) -> Vec { + self.0 + .transactions + .iter() + .cloned() + .map(EthereumSourceTransaction) + .collect() + } +} + +/// Ethereum source transaction. +struct EthereumSourceTransaction(EthereumTransaction); + +impl SourceTransaction for EthereumSourceTransaction { + type Hash = EthereumTransactionHash; + + fn hash(&self) -> Self::Hash { + self.0.hash + } +} + +/// Ethereum node as transactions proof source. +#[derive(Clone)] +struct EthereumTransactionsSource { + client: EthereumClient, +} + +#[async_trait] +impl RelayClient for EthereumTransactionsSource { + type Error = RpcError; + + async fn reconnect(&mut self) -> Result<(), RpcError> { + self.client.reconnect().await.map_err(Into::into) + } +} + +#[async_trait] +impl SourceClient for EthereumTransactionsSource { + async fn tick(&self) { + async_std::task::sleep(ETHEREUM_TICK_INTERVAL).await; + } + + async fn block_by_hash(&self, hash: H256) -> Result { + self.client + .header_by_hash_with_transactions(hash) + .await + .map(EthereumSourceBlock) + .map_err(Into::into) + } + + async fn block_by_number(&self, number: u64) -> Result { + self.client + .header_by_number_with_transactions(number) + .await + .map(EthereumSourceBlock) + .map_err(Into::into) + } + + async fn transaction_block( + &self, + hash: &EthereumTransactionHash, + ) -> Result, RpcError> { + let eth_tx = match self.client.transaction_by_hash(*hash).await? { + Some(eth_tx) => eth_tx, + None => return Ok(None), + }; + + // we need transaction to be mined => check if it is included in the block + let (eth_header_id, eth_tx_index) = match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) { + (Some(block_number), Some(block_hash), Some(transaction_index)) => ( + HeaderId(block_number.as_u64(), block_hash), + transaction_index.as_u64() as _, + ), + _ => return Ok(None), + }; + + Ok(Some((eth_header_id, eth_tx_index))) + } + + async fn transaction_proof( + &self, + block: &EthereumSourceBlock, + tx_index: usize, + ) -> Result { + const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = "RPC level checks that transactions from Ethereum\ + node are having `raw` field; qed"; + const BLOCK_HAS_HASH_FIELD_PROOF: &str = "RPC level checks that block has `hash` field; qed"; + + let mut transaction_proof = Vec::with_capacity(block.0.transactions.len()); + for tx in &block.0.transactions { + let raw_tx_receipt = self + .client + .transaction_receipt(tx.hash) + .await + .map(|receipt| into_substrate_ethereum_receipt(&receipt)) + .map(|receipt| receipt.rlp())?; + let raw_tx = tx.raw.clone().expect(TRANSACTION_HAS_RAW_FIELD_PROOF).0; + transaction_proof.push((raw_tx, raw_tx_receipt)); + } + + Ok(EthereumTransactionInclusionProof { + block: block.0.hash.expect(BLOCK_HAS_HASH_FIELD_PROOF), + index: tx_index as _, + proof: transaction_proof, + }) + } +} + +/// Substrate node as transactions proof target. +#[derive(Clone)] +struct SubstrateTransactionsTarget { + client: SubstrateClient, + sign_params: RialtoSigningParams, + bridge_instance: Arc, +} + +#[async_trait] +impl RelayClient for SubstrateTransactionsTarget { + type Error = RpcError; + + async fn reconnect(&mut self) -> Result<(), RpcError> { + Ok(self.client.reconnect().await?) + } +} + +#[async_trait] +impl TargetClient for SubstrateTransactionsTarget { + async fn tick(&self) { + async_std::task::sleep(Rialto::AVERAGE_BLOCK_INTERVAL).await; + } + + async fn is_header_known(&self, id: &EthereumHeaderId) -> Result { + self.client.ethereum_header_known(*id).await + } + + async fn is_header_finalized(&self, id: &EthereumHeaderId) -> Result { + // we check if header is finalized by simple comparison of the header number and + // number of best finalized PoA header known to Substrate node. + // + // this may lead to failure in tx proof import if PoA reorganization has happened + // after we have checked that our tx has been included into given block + // + // the fix is easy, but since this code is mostly developed for demonstration purposes, + // I'm leaving this KISS-based design here + let best_finalized_ethereum_block = self.client.best_ethereum_finalized_block().await?; + Ok(id.0 <= best_finalized_ethereum_block.0) + } + + async fn best_finalized_header_id(&self) -> Result { + // we can't continue to relay exchange proofs if Substrate node is out of sync, because + // it may have already received (some of) proofs that we're going to relay + self.client.ensure_synced().await?; + + self.client.best_ethereum_finalized_block().await + } + + async fn filter_transaction_proof(&self, proof: &EthereumTransactionInclusionProof) -> Result { + // let's try to parse transaction locally + let (raw_tx, raw_tx_receipt) = &proof.proof[proof.index as usize]; + let parse_result = rialto_runtime::exchange::EthTransaction::parse(raw_tx); + if parse_result.is_err() { + return Ok(false); + } + + // now let's check if transaction is successful + match bp_eth_poa::Receipt::is_successful_raw_receipt(raw_tx_receipt) { + Ok(true) => (), + _ => return Ok(false), + } + + // seems that transaction is relayable - let's check if runtime is able to import it + // (we can't if e.g. header is pruned or there's some issue with tx data) + self.client.verify_exchange_transaction_proof(proof.clone()).await + } + + async fn submit_transaction_proof(&self, proof: EthereumTransactionInclusionProof) -> Result<(), RpcError> { + let (sign_params, bridge_instance) = (self.sign_params.clone(), self.bridge_instance.clone()); + self.client + .submit_exchange_transaction_proof(sign_params, bridge_instance, proof) + .await + } +} + +/// Relay exchange transaction proof(s) to Substrate node. +pub async fn run(params: EthereumExchangeParams) { + match params.mode { + ExchangeRelayMode::Single(eth_tx_hash) => { + let result = run_single_transaction_relay(params, eth_tx_hash).await; + match result { + Ok(_) => log::info!( + target: "bridge", + "Ethereum transaction {} proof has been successfully submitted to Substrate node", + eth_tx_hash, + ), + Err(err) => log::error!( + target: "bridge", + "Error submitting Ethereum transaction {} proof to Substrate node: {}", + eth_tx_hash, + err, + ), + } + } + ExchangeRelayMode::Auto(eth_start_with_block_number) => { + let result = run_auto_transactions_relay_loop(params, eth_start_with_block_number).await; + if let Err(err) = result { + log::error!( + target: "bridge", + "Error auto-relaying Ethereum transactions proofs to Substrate node: {}", + err, + ); + } + } + } +} + +/// Run single transaction proof relay and stop. +async fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H256) -> Result<(), String> { + let EthereumExchangeParams { + eth_params, + sub_params, + sub_sign, + instance, + .. + } = params; + + let eth_client = EthereumClient::new(eth_params).await.map_err(RpcError::Ethereum)?; + let sub_client = SubstrateClient::::new(sub_params) + .await + .map_err(RpcError::Substrate)?; + + let source = EthereumTransactionsSource { client: eth_client }; + let target = SubstrateTransactionsTarget { + client: sub_client, + sign_params: sub_sign, + bridge_instance: instance, + }; + + relay_single_transaction_proof(&source, &target, eth_tx_hash).await +} + +async fn run_auto_transactions_relay_loop( + params: EthereumExchangeParams, + eth_start_with_block_number: Option, +) -> Result<(), String> { + let EthereumExchangeParams { + eth_params, + sub_params, + sub_sign, + metrics_params, + instance, + .. + } = params; + + let eth_client = EthereumClient::new(eth_params) + .await + .map_err(|err| format!("Error starting Ethereum client: {:?}", err))?; + let sub_client = SubstrateClient::::new(sub_params) + .await + .map_err(|err| format!("Error starting Substrate client: {:?}", err))?; + + let eth_start_with_block_number = match eth_start_with_block_number { + Some(eth_start_with_block_number) => eth_start_with_block_number, + None => { + sub_client + .best_ethereum_finalized_block() + .await + .map_err(|err| { + format!( + "Error retrieving best finalized Ethereum block from Substrate node: {:?}", + err + ) + })? + .0 + } + }; + + run_loop( + InMemoryStorage::new(eth_start_with_block_number), + EthereumTransactionsSource { client: eth_client }, + SubstrateTransactionsTarget { + client: sub_client, + sign_params: sub_sign, + bridge_instance: instance, + }, + metrics_params, + futures::future::pending(), + ) + .await?; + + Ok(()) +} diff --git a/polkadot/relays/bin-ethereum/src/ethereum_exchange_submit.rs b/polkadot/relays/bin-ethereum/src/ethereum_exchange_submit.rs new file mode 100644 index 00000000000..09871a0fc78 --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/ethereum_exchange_submit.rs @@ -0,0 +1,114 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Submitting Ethereum -> Substrate exchange transactions. + +use bp_eth_poa::{ + signatures::{secret_to_address, SignTransaction}, + UnsignedTransaction, +}; +use relay_ethereum_client::{ + types::{CallRequest, U256}, + Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams, +}; +use rialto_runtime::exchange::LOCK_FUNDS_ADDRESS; + +/// Ethereum exchange transaction params. +#[derive(Debug)] +pub struct EthereumExchangeSubmitParams { + /// Ethereum connection params. + pub eth_params: EthereumConnectionParams, + /// Ethereum signing params. + pub eth_sign: EthereumSigningParams, + /// Ethereum signer nonce. + pub eth_nonce: Option, + /// Amount of Ethereum tokens to lock. + pub eth_amount: U256, + /// Funds recipient on Substrate side. + pub sub_recipient: [u8; 32], +} + +/// Submit single Ethereum -> Substrate exchange transaction. +pub async fn run(params: EthereumExchangeSubmitParams) { + let EthereumExchangeSubmitParams { + eth_params, + eth_sign, + eth_nonce, + eth_amount, + sub_recipient, + } = params; + + let result: Result<_, String> = async move { + let eth_client = EthereumClient::new(eth_params) + .await + .map_err(|err| format!("error connecting to Ethereum node: {:?}", err))?; + + let eth_signer_address = secret_to_address(ð_sign.signer); + let sub_recipient_encoded = sub_recipient; + let nonce = match eth_nonce { + Some(eth_nonce) => eth_nonce, + None => eth_client + .account_nonce(eth_signer_address) + .await + .map_err(|err| format!("error fetching acount nonce: {:?}", err))?, + }; + let gas = eth_client + .estimate_gas(CallRequest { + from: Some(eth_signer_address), + to: Some(LOCK_FUNDS_ADDRESS.into()), + value: Some(eth_amount), + data: Some(sub_recipient_encoded.to_vec().into()), + ..Default::default() + }) + .await + .map_err(|err| format!("error estimating gas requirements: {:?}", err))?; + let eth_tx_unsigned = UnsignedTransaction { + nonce, + gas_price: eth_sign.gas_price, + gas, + to: Some(LOCK_FUNDS_ADDRESS.into()), + value: eth_amount, + payload: sub_recipient_encoded.to_vec(), + }; + let eth_tx_signed = eth_tx_unsigned + .clone() + .sign_by(ð_sign.signer, Some(eth_sign.chain_id)); + eth_client + .submit_transaction(eth_tx_signed) + .await + .map_err(|err| format!("error submitting transaction: {:?}", err))?; + + Ok(eth_tx_unsigned) + } + .await; + + match result { + Ok(eth_tx_unsigned) => { + log::info!( + target: "bridge", + "Exchange transaction has been submitted to Ethereum node: {:?}", + eth_tx_unsigned, + ); + } + Err(err) => { + log::error!( + target: "bridge", + "Error submitting exchange transaction to Ethereum node: {}", + err, + ); + } + } +} diff --git a/polkadot/relays/bin-ethereum/src/ethereum_sync_loop.rs b/polkadot/relays/bin-ethereum/src/ethereum_sync_loop.rs new file mode 100644 index 00000000000..3dcd27e18f6 --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/ethereum_sync_loop.rs @@ -0,0 +1,298 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Ethereum PoA -> Rialto-Substrate synchronization. + +use crate::ethereum_client::EthereumHighLevelRpc; +use crate::instances::BridgeInstance; +use crate::rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc}; +use crate::rpc_errors::RpcError; +use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}; + +use async_trait::async_trait; +use codec::Encode; +use headers_relay::{ + sync::{HeadersSyncParams, TargetTransactionMode}, + sync_loop::{SourceClient, TargetClient}, + sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, +}; +use relay_ethereum_client::{ + types::{HeaderHash, HeaderId as EthereumHeaderId, Receipt, SyncHeader as Header}, + Client as EthereumClient, ConnectionParams as EthereumConnectionParams, +}; +use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; +use relay_substrate_client::{ + Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, +}; +use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; + +use std::fmt::Debug; +use std::{collections::HashSet, sync::Arc, time::Duration}; + +pub mod consts { + use super::*; + + /// Interval at which we check new Ethereum headers when we are synced/almost synced. + pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(10); + /// Max number of headers in single submit transaction. + pub const MAX_HEADERS_IN_SINGLE_SUBMIT: usize = 32; + /// Max total size of headers in single submit transaction. This only affects signed + /// submissions, when several headers are submitted at once. 4096 is the maximal **expected** + /// size of the Ethereum header + transactions receipts (if they're required). + pub const MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT: usize = MAX_HEADERS_IN_SINGLE_SUBMIT * 4096; + /// Max Ethereum headers we want to have in all 'before-submitted' states. + pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 128; + /// Max Ethereum headers count we want to have in 'submitted' state. + pub const MAX_SUBMITTED_HEADERS: usize = 128; + /// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned). + pub const PRUNE_DEPTH: u32 = 4096; +} + +/// Ethereum synchronization parameters. +pub struct EthereumSyncParams { + /// Ethereum connection params. + pub eth_params: EthereumConnectionParams, + /// Substrate connection params. + pub sub_params: SubstrateConnectionParams, + /// Substrate signing params. + pub sub_sign: RialtoSigningParams, + /// Synchronization parameters. + pub sync_params: HeadersSyncParams, + /// Metrics parameters. + pub metrics_params: MetricsParams, + /// Instance of the bridge pallet being synchronized. + pub instance: Arc, +} + +impl Debug for EthereumSyncParams { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("EthereumSyncParams") + .field("eth_params", &self.eth_params) + .field("sub_params", &self.sub_params) + .field("sub_sign", &sp_core::Pair::public(&self.sub_sign)) + .field("sync_params", &self.sync_params) + .field("metrics_params", &self.metrics_params) + .field("instance", &self.instance) + .finish() + } +} + +/// Ethereum synchronization pipeline. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct EthereumHeadersSyncPipeline; + +impl HeadersSyncPipeline for EthereumHeadersSyncPipeline { + const SOURCE_NAME: &'static str = "Ethereum"; + const TARGET_NAME: &'static str = "Substrate"; + + type Hash = HeaderHash; + type Number = u64; + type Header = Header; + type Extra = Vec; + type Completion = (); + + fn estimate_size(source: &QueuedHeader) -> usize { + into_substrate_ethereum_header(source.header()).encode().len() + + into_substrate_ethereum_receipts(source.extra()) + .map(|extra| extra.encode().len()) + .unwrap_or(0) + } +} + +/// Queued ethereum header ID. +pub type QueuedEthereumHeader = QueuedHeader; + +/// Ethereum client as headers source. +#[derive(Clone)] +struct EthereumHeadersSource { + /// Ethereum node client. + client: EthereumClient, +} + +impl EthereumHeadersSource { + fn new(client: EthereumClient) -> Self { + Self { client } + } +} + +#[async_trait] +impl RelayClient for EthereumHeadersSource { + type Error = RpcError; + + async fn reconnect(&mut self) -> Result<(), RpcError> { + self.client.reconnect().await.map_err(Into::into) + } +} + +#[async_trait] +impl SourceClient for EthereumHeadersSource { + async fn best_block_number(&self) -> Result { + // we **CAN** continue to relay headers if Ethereum node is out of sync, because + // Substrate node may be missing headers that are already available at the Ethereum + + self.client.best_block_number().await.map_err(Into::into) + } + + async fn header_by_hash(&self, hash: HeaderHash) -> Result { + self.client + .header_by_hash(hash) + .await + .map(Into::into) + .map_err(Into::into) + } + + async fn header_by_number(&self, number: u64) -> Result { + self.client + .header_by_number(number) + .await + .map(Into::into) + .map_err(Into::into) + } + + async fn header_completion(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, Option<()>), RpcError> { + Ok((id, None)) + } + + async fn header_extra( + &self, + id: EthereumHeaderId, + header: QueuedEthereumHeader, + ) -> Result<(EthereumHeaderId, Vec), RpcError> { + self.client + .transaction_receipts(id, header.header().transactions.clone()) + .await + } +} + +#[derive(Clone)] +struct SubstrateHeadersTarget { + /// Substrate node client. + client: SubstrateClient, + /// Whether we want to submit signed (true), or unsigned (false) transactions. + sign_transactions: bool, + /// Substrate signing params. + sign_params: RialtoSigningParams, + /// Bridge instance used in Ethereum to Substrate sync. + bridge_instance: Arc, +} + +impl SubstrateHeadersTarget { + fn new( + client: SubstrateClient, + sign_transactions: bool, + sign_params: RialtoSigningParams, + bridge_instance: Arc, + ) -> Self { + Self { + client, + sign_transactions, + sign_params, + bridge_instance, + } + } +} + +#[async_trait] +impl RelayClient for SubstrateHeadersTarget { + type Error = RpcError; + + async fn reconnect(&mut self) -> Result<(), RpcError> { + Ok(self.client.reconnect().await?) + } +} + +#[async_trait] +impl TargetClient for SubstrateHeadersTarget { + async fn best_header_id(&self) -> Result { + // we can't continue to relay headers if Substrate node is out of sync, because + // it may have already received (some of) headers that we're going to relay + self.client.ensure_synced().await?; + + self.client.best_ethereum_block().await + } + + async fn is_known_header(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, bool), RpcError> { + Ok((id, self.client.ethereum_header_known(id).await?)) + } + + async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { + let (sign_params, bridge_instance, sign_transactions) = ( + self.sign_params.clone(), + self.bridge_instance.clone(), + self.sign_transactions, + ); + self.client + .submit_ethereum_headers(sign_params, bridge_instance, headers, sign_transactions) + .await + } + + async fn incomplete_headers_ids(&self) -> Result, RpcError> { + Ok(HashSet::new()) + } + + #[allow(clippy::unit_arg)] + async fn complete_header(&self, id: EthereumHeaderId, _completion: ()) -> Result { + Ok(id) + } + + async fn requires_extra(&self, header: QueuedEthereumHeader) -> Result<(EthereumHeaderId, bool), RpcError> { + // we can minimize number of receipts_check calls by checking header + // logs bloom here, but it may give us false positives (when authorities + // source is contract, we never need any logs) + let id = header.header().id(); + let sub_eth_header = into_substrate_ethereum_header(header.header()); + Ok((id, self.client.ethereum_receipts_required(sub_eth_header).await?)) + } +} + +/// Run Ethereum headers synchronization. +pub async fn run(params: EthereumSyncParams) -> Result<(), RpcError> { + let EthereumSyncParams { + eth_params, + sub_params, + sub_sign, + sync_params, + metrics_params, + instance, + } = params; + + let eth_client = EthereumClient::new(eth_params).await?; + let sub_client = SubstrateClient::::new(sub_params).await?; + + let sign_sub_transactions = match sync_params.target_tx_mode { + TargetTransactionMode::Signed | TargetTransactionMode::Backup => true, + TargetTransactionMode::Unsigned => false, + }; + + let source = EthereumHeadersSource::new(eth_client); + let target = SubstrateHeadersTarget::new(sub_client, sign_sub_transactions, sub_sign, instance); + + headers_relay::sync_loop::run( + source, + consts::ETHEREUM_TICK_INTERVAL, + target, + Rialto::AVERAGE_BLOCK_INTERVAL, + (), + sync_params, + metrics_params, + futures::future::pending(), + ) + .await + .map_err(RpcError::SyncLoop)?; + + Ok(()) +} diff --git a/polkadot/relays/bin-ethereum/src/instances.rs b/polkadot/relays/bin-ethereum/src/instances.rs new file mode 100644 index 00000000000..0d2a508f455 --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/instances.rs @@ -0,0 +1,115 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The PoA Bridge Pallet provides a way to include multiple instances of itself in a runtime. When +//! synchronizing a Substrate chain which can include multiple instances of the bridge pallet we +//! must somehow decide which of the instances to sync. +//! +//! Note that each instance of the bridge pallet is coupled with an instance of the currency exchange +//! pallet. We must also have a way to create `Call`s for the correct currency exchange instance. +//! +//! This module helps by preparing the correct `Call`s for each of the different pallet instances. + +use crate::ethereum_sync_loop::QueuedEthereumHeader; +use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts}; + +use rialto_runtime::exchange::EthereumTransactionInclusionProof as Proof; +use rialto_runtime::Call; + +/// Interface for `Calls` which are needed to correctly sync the bridge. +/// +/// Each instance of the bridge and currency exchange pallets in the bridge runtime requires similar +/// but slightly different `Call` in order to be synchronized. +pub trait BridgeInstance: Send + Sync + std::fmt::Debug { + /// Used to build a `Call` for importing signed headers to a Substrate runtime. + fn build_signed_header_call(&self, headers: Vec) -> Call; + /// Used to build a `Call` for importing an unsigned header to a Substrate runtime. + fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call; + /// Used to build a `Call` for importing peer transactions to a Substrate runtime. + fn build_currency_exchange_call(&self, proof: Proof) -> Call; +} + +/// Corresponds to the Rialto instance used in the bridge runtime. +#[derive(Default, Clone, Debug)] +pub struct RialtoPoA; + +impl BridgeInstance for RialtoPoA { + fn build_signed_header_call(&self, headers: Vec) -> Call { + let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers( + headers + .into_iter() + .map(|header| { + ( + into_substrate_ethereum_header(&header.header()), + into_substrate_ethereum_receipts(header.extra()), + ) + }) + .collect(), + ); + + rialto_runtime::Call::BridgeRialtoPoA(pallet_call) + } + + fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { + let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header( + into_substrate_ethereum_header(&header.header()), + into_substrate_ethereum_receipts(header.extra()), + ); + + rialto_runtime::Call::BridgeRialtoPoA(pallet_call) + } + + fn build_currency_exchange_call(&self, proof: Proof) -> Call { + let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof); + rialto_runtime::Call::BridgeRialtoCurrencyExchange(pallet_call) + } +} + +/// Corresponds to the Kovan instance used in the bridge runtime. +#[derive(Default, Clone, Debug)] +pub struct Kovan; + +impl BridgeInstance for Kovan { + fn build_signed_header_call(&self, headers: Vec) -> Call { + let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers( + headers + .into_iter() + .map(|header| { + ( + into_substrate_ethereum_header(header.header()), + into_substrate_ethereum_receipts(header.extra()), + ) + }) + .collect(), + ); + + rialto_runtime::Call::BridgeKovan(pallet_call) + } + + fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call { + let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header( + into_substrate_ethereum_header(header.header()), + into_substrate_ethereum_receipts(header.extra()), + ); + + rialto_runtime::Call::BridgeKovan(pallet_call) + } + + fn build_currency_exchange_call(&self, proof: Proof) -> Call { + let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof); + rialto_runtime::Call::BridgeKovanCurrencyExchange(pallet_call) + } +} diff --git a/polkadot/relays/bin-ethereum/src/main.rs b/polkadot/relays/bin-ethereum/src/main.rs new file mode 100644 index 00000000000..234e1237fcf --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/main.rs @@ -0,0 +1,413 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![recursion_limit = "1024"] + +mod ethereum_client; +mod ethereum_deploy_contract; +mod ethereum_exchange; +mod ethereum_exchange_submit; +mod ethereum_sync_loop; +mod instances; +mod rialto_client; +mod rpc_errors; +mod substrate_sync_loop; +mod substrate_types; + +use ethereum_deploy_contract::EthereumDeployContractParams; +use ethereum_exchange::EthereumExchangeParams; +use ethereum_exchange_submit::EthereumExchangeSubmitParams; +use ethereum_sync_loop::EthereumSyncParams; +use headers_relay::sync::TargetTransactionMode; +use hex_literal::hex; +use instances::{BridgeInstance, Kovan, RialtoPoA}; +use relay_utils::{ + initialize::initialize_relay, + metrics::{MetricsAddress, MetricsParams}, +}; +use secp256k1::SecretKey; +use sp_core::crypto::Pair; +use substrate_sync_loop::SubstrateSyncParams; + +use headers_relay::sync::HeadersSyncParams; +use relay_ethereum_client::{ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams}; +use relay_rialto_client::SigningParams as RialtoSigningParams; +use relay_substrate_client::ConnectionParams as SubstrateConnectionParams; +use std::sync::Arc; + +fn main() { + initialize_relay(); + + let yaml = clap::load_yaml!("cli.yml"); + let matches = clap::App::from_yaml(yaml).get_matches(); + async_std::task::block_on(run_command(&matches)); +} + +async fn run_command(matches: &clap::ArgMatches<'_>) { + match matches.subcommand() { + ("eth-to-sub", Some(eth_to_sub_matches)) => { + log::info!(target: "bridge", "Starting ETH âž¡ SUB relay."); + if ethereum_sync_loop::run(match ethereum_sync_params(ð_to_sub_matches) { + Ok(ethereum_sync_params) => ethereum_sync_params, + Err(err) => { + log::error!(target: "bridge", "Error parsing parameters: {}", err); + return; + } + }) + .await + .is_err() + { + log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync."); + }; + } + ("sub-to-eth", Some(sub_to_eth_matches)) => { + log::info!(target: "bridge", "Starting SUB âž¡ ETH relay."); + if substrate_sync_loop::run(match substrate_sync_params(&sub_to_eth_matches) { + Ok(substrate_sync_params) => substrate_sync_params, + Err(err) => { + log::error!(target: "bridge", "Error parsing parameters: {}", err); + return; + } + }) + .await + .is_err() + { + log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync."); + }; + } + ("eth-deploy-contract", Some(eth_deploy_matches)) => { + log::info!(target: "bridge", "Deploying ETH contracts."); + ethereum_deploy_contract::run(match ethereum_deploy_contract_params(ð_deploy_matches) { + Ok(ethereum_deploy_params) => ethereum_deploy_params, + Err(err) => { + log::error!(target: "bridge", "Error during contract deployment: {}", err); + return; + } + }) + .await; + } + ("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => { + log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction."); + ethereum_exchange_submit::run(match ethereum_exchange_submit_params(ð_exchange_submit_matches) { + Ok(eth_exchange_submit_params) => eth_exchange_submit_params, + Err(err) => { + log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err); + return; + } + }) + .await; + } + ("eth-exchange-sub", Some(eth_exchange_matches)) => { + log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay."); + ethereum_exchange::run(match ethereum_exchange_params(ð_exchange_matches) { + Ok(eth_exchange_params) => eth_exchange_params, + Err(err) => { + log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err); + return; + } + }) + .await; + } + ("", _) => { + log::error!(target: "bridge", "No subcommand specified"); + } + _ => unreachable!("all possible subcommands are checked above; qed"), + } +} + +fn ethereum_connection_params(matches: &clap::ArgMatches) -> Result { + let mut params = EthereumConnectionParams::default(); + if let Some(eth_host) = matches.value_of("eth-host") { + params.host = eth_host.into(); + } + if let Some(eth_port) = matches.value_of("eth-port") { + params.port = eth_port + .parse() + .map_err(|e| format!("Failed to parse eth-port: {}", e))?; + } + Ok(params) +} + +fn ethereum_signing_params(matches: &clap::ArgMatches) -> Result { + let mut params = EthereumSigningParams::default(); + if let Some(eth_signer) = matches.value_of("eth-signer") { + params.signer = + SecretKey::parse_slice(&hex::decode(eth_signer).map_err(|e| format!("Failed to parse eth-signer: {}", e))?) + .map_err(|e| format!("Invalid eth-signer: {}", e))?; + } + if let Some(eth_chain_id) = matches.value_of("eth-chain-id") { + params.chain_id = eth_chain_id + .parse::() + .map_err(|e| format!("Failed to parse eth-chain-id: {}", e))?; + } + Ok(params) +} + +fn substrate_connection_params(matches: &clap::ArgMatches) -> Result { + let mut params = SubstrateConnectionParams::default(); + if let Some(sub_host) = matches.value_of("sub-host") { + params.host = sub_host.into(); + } + if let Some(sub_port) = matches.value_of("sub-port") { + params.port = sub_port + .parse() + .map_err(|e| format!("Failed to parse sub-port: {}", e))?; + } + Ok(params) +} + +fn rialto_signing_params(matches: &clap::ArgMatches) -> Result { + let mut params = sp_keyring::AccountKeyring::Alice.pair(); + + if let Some(sub_signer) = matches.value_of("sub-signer") { + let sub_signer_password = matches.value_of("sub-signer-password"); + params = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password) + .map_err(|e| format!("Failed to parse sub-signer: {:?}", e))?; + } + Ok(params) +} + +fn ethereum_sync_params(matches: &clap::ArgMatches) -> Result { + use crate::ethereum_sync_loop::consts::*; + + let mut sync_params = HeadersSyncParams { + max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, + max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, + max_headers_in_single_submit: MAX_HEADERS_IN_SINGLE_SUBMIT, + max_headers_size_in_single_submit: MAX_HEADERS_SIZE_IN_SINGLE_SUBMIT, + prune_depth: PRUNE_DEPTH, + target_tx_mode: TargetTransactionMode::Signed, + }; + + match matches.value_of("sub-tx-mode") { + Some("signed") => sync_params.target_tx_mode = TargetTransactionMode::Signed, + Some("unsigned") => { + sync_params.target_tx_mode = TargetTransactionMode::Unsigned; + + // tx pool won't accept too much unsigned transactions + sync_params.max_headers_in_submitted_status = 10; + } + Some("backup") => sync_params.target_tx_mode = TargetTransactionMode::Backup, + Some(mode) => return Err(format!("Invalid sub-tx-mode: {}", mode)), + None => sync_params.target_tx_mode = TargetTransactionMode::Signed, + } + + let params = EthereumSyncParams { + eth_params: ethereum_connection_params(matches)?, + sub_params: substrate_connection_params(matches)?, + sub_sign: rialto_signing_params(matches)?, + metrics_params: metrics_params(matches)?, + instance: instance_params(matches)?, + sync_params, + }; + + log::debug!(target: "bridge", "Ethereum sync params: {:?}", params); + + Ok(params) +} + +fn substrate_sync_params(matches: &clap::ArgMatches) -> Result { + use crate::substrate_sync_loop::consts::*; + + let eth_contract_address: relay_ethereum_client::types::Address = + if let Some(eth_contract) = matches.value_of("eth-contract") { + eth_contract.parse().map_err(|e| format!("{}", e))? + } else { + "731a10897d267e19b34503ad902d0a29173ba4b1" + .parse() + .expect("address is hardcoded, thus valid; qed") + }; + + let params = SubstrateSyncParams { + sub_params: substrate_connection_params(matches)?, + eth_params: ethereum_connection_params(matches)?, + eth_sign: ethereum_signing_params(matches)?, + metrics_params: metrics_params(matches)?, + sync_params: HeadersSyncParams { + max_future_headers_to_download: MAX_FUTURE_HEADERS_TO_DOWNLOAD, + max_headers_in_submitted_status: MAX_SUBMITTED_HEADERS, + max_headers_in_single_submit: MAX_SUBMITTED_HEADERS, + max_headers_size_in_single_submit: std::usize::MAX, + prune_depth: PRUNE_DEPTH, + target_tx_mode: TargetTransactionMode::Signed, + }, + eth_contract_address, + }; + + log::debug!(target: "bridge", "Substrate sync params: {:?}", params); + + Ok(params) +} + +fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result { + let eth_contract_code = parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| { + hex::decode(include_str!("../res/substrate-bridge-bytecode.hex")).expect("code is hardcoded, thus valid; qed") + }); + let sub_initial_authorities_set_id = matches + .value_of("sub-authorities-set-id") + .map(|set| { + set.parse() + .map_err(|e| format!("Failed to parse sub-authorities-set-id: {}", e)) + }) + .transpose()?; + let sub_initial_authorities_set = parse_hex_argument(matches, "sub-authorities-set")?; + let sub_initial_header = parse_hex_argument(matches, "sub-initial-header")?; + + let params = EthereumDeployContractParams { + eth_params: ethereum_connection_params(matches)?, + eth_sign: ethereum_signing_params(matches)?, + sub_params: substrate_connection_params(matches)?, + sub_initial_authorities_set_id, + sub_initial_authorities_set, + sub_initial_header, + eth_contract_code, + }; + + log::debug!(target: "bridge", "Deploy params: {:?}", params); + + Ok(params) +} + +fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result { + let eth_nonce = matches + .value_of("eth-nonce") + .map(|eth_nonce| { + relay_ethereum_client::types::U256::from_dec_str(ð_nonce) + .map_err(|e| format!("Failed to parse eth-nonce: {}", e)) + }) + .transpose()?; + + let eth_amount = matches + .value_of("eth-amount") + .map(|eth_amount| { + eth_amount + .parse() + .map_err(|e| format!("Failed to parse eth-amount: {}", e)) + }) + .transpose()? + .unwrap_or_else(|| { + // This is in Wei, represents 1 ETH + 1_000_000_000_000_000_000_u64.into() + }); + + // This is the well-known Substrate account of Ferdie + let default_recepient = hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c"); + + let sub_recipient = if let Some(sub_recipient) = matches.value_of("sub-recipient") { + hex::decode(&sub_recipient) + .map_err(|err| err.to_string()) + .and_then(|vsub_recipient| { + let expected_len = default_recepient.len(); + if expected_len != vsub_recipient.len() { + Err(format!("invalid length. Expected {} bytes", expected_len)) + } else { + let mut sub_recipient = default_recepient; + sub_recipient.copy_from_slice(&vsub_recipient[..expected_len]); + Ok(sub_recipient) + } + }) + .map_err(|e| format!("Failed to parse sub-recipient: {}", e))? + } else { + default_recepient + }; + + let params = EthereumExchangeSubmitParams { + eth_params: ethereum_connection_params(matches)?, + eth_sign: ethereum_signing_params(matches)?, + eth_nonce, + eth_amount, + sub_recipient, + }; + + log::debug!(target: "bridge", "Submit Ethereum exchange tx params: {:?}", params); + + Ok(params) +} + +fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result { + let mode = match matches.value_of("eth-tx-hash") { + Some(eth_tx_hash) => ethereum_exchange::ExchangeRelayMode::Single( + eth_tx_hash + .parse() + .map_err(|e| format!("Failed to parse eth-tx-hash: {}", e))?, + ), + None => ethereum_exchange::ExchangeRelayMode::Auto( + matches + .value_of("eth-start-with-block") + .map(|eth_start_with_block| { + eth_start_with_block + .parse() + .map_err(|e| format!("Failed to parse eth-start-with-block: {}", e)) + }) + .transpose()?, + ), + }; + + let params = EthereumExchangeParams { + eth_params: ethereum_connection_params(matches)?, + sub_params: substrate_connection_params(matches)?, + sub_sign: rialto_signing_params(matches)?, + metrics_params: metrics_params(matches)?, + instance: instance_params(matches)?, + mode, + }; + + log::debug!(target: "bridge", "Ethereum exchange params: {:?}", params); + + Ok(params) +} + +fn metrics_params(matches: &clap::ArgMatches) -> Result { + if matches.is_present("no-prometheus") { + return Ok(None.into()); + } + + let mut metrics_params = MetricsAddress::default(); + + if let Some(prometheus_host) = matches.value_of("prometheus-host") { + metrics_params.host = prometheus_host.into(); + } + if let Some(prometheus_port) = matches.value_of("prometheus-port") { + metrics_params.port = prometheus_port + .parse() + .map_err(|e| format!("Failed to parse prometheus-port: {}", e))?; + } + + Ok(Some(metrics_params).into()) +} + +fn instance_params(matches: &clap::ArgMatches) -> Result, String> { + let instance = if let Some(instance) = matches.value_of("sub-pallet-instance") { + match instance.to_lowercase().as_str() { + "rialto" => Arc::new(RialtoPoA) as Arc, + "kovan" => Arc::new(Kovan), + _ => return Err("Unsupported bridge pallet instance".to_string()), + } + } else { + unreachable!("CLI config enforces a default instance, can never be None") + }; + + Ok(instance) +} + +fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> Result>, String> { + match matches.value_of(arg) { + Some(value) => Ok(Some( + hex::decode(value).map_err(|e| format!("Failed to parse {}: {}", arg, e))?, + )), + None => Ok(None), + } +} diff --git a/polkadot/relays/bin-ethereum/src/rialto_client.rs b/polkadot/relays/bin-ethereum/src/rialto_client.rs new file mode 100644 index 00000000000..d9c0f265cbb --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/rialto_client.rs @@ -0,0 +1,279 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::ethereum_sync_loop::QueuedEthereumHeader; +use crate::instances::BridgeInstance; +use crate::rpc_errors::RpcError; + +use async_trait::async_trait; +use bp_eth_poa::AuraHeader as SubstrateEthereumHeader; +use codec::{Decode, Encode}; +use headers_relay::sync_types::SubmittedHeaders; +use relay_ethereum_client::types::HeaderId as EthereumHeaderId; +use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; +use relay_substrate_client::{Client as SubstrateClient, TransactionSignScheme}; +use relay_utils::HeaderId; +use sp_core::{crypto::Pair, Bytes}; +use std::{collections::VecDeque, sync::Arc}; + +const ETH_API_IMPORT_REQUIRES_RECEIPTS: &str = "RialtoPoAHeaderApi_is_import_requires_receipts"; +const ETH_API_IS_KNOWN_BLOCK: &str = "RialtoPoAHeaderApi_is_known_block"; +const ETH_API_BEST_BLOCK: &str = "RialtoPoAHeaderApi_best_block"; +const ETH_API_BEST_FINALIZED_BLOCK: &str = "RialtoPoAHeaderApi_finalized_block"; +const EXCH_API_FILTER_TRANSACTION_PROOF: &str = "RialtoCurrencyExchangeApi_filter_transaction_proof"; + +type RpcResult = std::result::Result; + +/// A trait which contains methods that work by using multiple low-level RPCs, or more complicated +/// interactions involving, for example, an Ethereum bridge module. +#[async_trait] +pub trait SubstrateHighLevelRpc { + /// Returns best Ethereum block that Substrate runtime knows of. + async fn best_ethereum_block(&self) -> RpcResult; + /// Returns best finalized Ethereum block that Substrate runtime knows of. + async fn best_ethereum_finalized_block(&self) -> RpcResult; + /// Returns whether or not transactions receipts are required for Ethereum header submission. + async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult; + /// Returns whether or not the given Ethereum header is known to the Substrate runtime. + async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult; +} + +#[async_trait] +impl SubstrateHighLevelRpc for SubstrateClient { + async fn best_ethereum_block(&self) -> RpcResult { + let call = ETH_API_BEST_BLOCK.to_string(); + let data = Bytes(Vec::new()); + + let encoded_response = self.state_call(call, data, None).await?; + let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?; + + let best_header_id = HeaderId(decoded_response.0, decoded_response.1); + Ok(best_header_id) + } + + async fn best_ethereum_finalized_block(&self) -> RpcResult { + let call = ETH_API_BEST_FINALIZED_BLOCK.to_string(); + let data = Bytes(Vec::new()); + + let encoded_response = self.state_call(call, data, None).await?; + let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?; + + let best_header_id = HeaderId(decoded_response.0, decoded_response.1); + Ok(best_header_id) + } + + async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult { + let call = ETH_API_IMPORT_REQUIRES_RECEIPTS.to_string(); + let data = Bytes(header.encode()); + + let encoded_response = self.state_call(call, data, None).await?; + let receipts_required: bool = Decode::decode(&mut &encoded_response.0[..])?; + + Ok(receipts_required) + } + + // The Substrate module could prune old headers. So this function could return false even + // if header is synced. And we'll mark corresponding Ethereum header as Orphan. + // + // But when we read the best header from Substrate next time, we will know that + // there's a better header. This Orphan will either be marked as synced, or + // eventually pruned. + async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult { + let call = ETH_API_IS_KNOWN_BLOCK.to_string(); + let data = Bytes(header_id.1.encode()); + + let encoded_response = self.state_call(call, data, None).await?; + let is_known_block: bool = Decode::decode(&mut &encoded_response.0[..])?; + + Ok(is_known_block) + } +} + +/// A trait for RPC calls which are used to submit Ethereum headers to a Substrate +/// runtime. These are typically calls which use a combination of other low-level RPC +/// calls. +#[async_trait] +pub trait SubmitEthereumHeaders { + /// Submits Ethereum header to Substrate runtime. + async fn submit_ethereum_headers( + &self, + params: RialtoSigningParams, + instance: Arc, + headers: Vec, + sign_transactions: bool, + ) -> SubmittedHeaders; + + /// Submits signed Ethereum header to Substrate runtime. + async fn submit_signed_ethereum_headers( + &self, + params: RialtoSigningParams, + instance: Arc, + headers: Vec, + ) -> SubmittedHeaders; + + /// Submits unsigned Ethereum header to Substrate runtime. + async fn submit_unsigned_ethereum_headers( + &self, + instance: Arc, + headers: Vec, + ) -> SubmittedHeaders; +} + +#[async_trait] +impl SubmitEthereumHeaders for SubstrateClient { + async fn submit_ethereum_headers( + &self, + params: RialtoSigningParams, + instance: Arc, + headers: Vec, + sign_transactions: bool, + ) -> SubmittedHeaders { + if sign_transactions { + self.submit_signed_ethereum_headers(params, instance, headers).await + } else { + self.submit_unsigned_ethereum_headers(instance, headers).await + } + } + + async fn submit_signed_ethereum_headers( + &self, + params: RialtoSigningParams, + instance: Arc, + headers: Vec, + ) -> SubmittedHeaders { + let ids = headers.iter().map(|header| header.id()).collect(); + let submission_result = async { + self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| { + Bytes( + Rialto::sign_transaction( + *self.genesis_hash(), + ¶ms, + transaction_nonce, + instance.build_signed_header_call(headers), + ) + .encode(), + ) + }) + .await?; + Ok(()) + } + .await; + + match submission_result { + Ok(_) => SubmittedHeaders { + submitted: ids, + incomplete: Vec::new(), + rejected: Vec::new(), + fatal_error: None, + }, + Err(error) => SubmittedHeaders { + submitted: Vec::new(), + incomplete: Vec::new(), + rejected: ids, + fatal_error: Some(error), + }, + } + } + + async fn submit_unsigned_ethereum_headers( + &self, + instance: Arc, + headers: Vec, + ) -> SubmittedHeaders { + let mut ids = headers.iter().map(|header| header.id()).collect::>(); + let mut submitted_headers = SubmittedHeaders::default(); + + for header in headers { + let id = ids.pop_front().expect("both collections have same size; qed"); + + let call = instance.build_unsigned_header_call(header); + let transaction = create_unsigned_submit_transaction(call); + + match self.submit_unsigned_extrinsic(Bytes(transaction.encode())).await { + Ok(_) => submitted_headers.submitted.push(id), + Err(error) => { + submitted_headers.rejected.push(id); + submitted_headers.rejected.extend(ids); + submitted_headers.fatal_error = Some(error.into()); + break; + } + } + } + + submitted_headers + } +} + +/// A trait for RPC calls which are used to submit proof of Ethereum exchange transaction to a +/// Substrate runtime. These are typically calls which use a combination of other low-level RPC +/// calls. +#[async_trait] +pub trait SubmitEthereumExchangeTransactionProof { + /// Pre-verify Ethereum exchange transaction proof. + async fn verify_exchange_transaction_proof( + &self, + proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, + ) -> RpcResult; + /// Submits Ethereum exchange transaction proof to Substrate runtime. + async fn submit_exchange_transaction_proof( + &self, + params: RialtoSigningParams, + instance: Arc, + proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, + ) -> RpcResult<()>; +} + +#[async_trait] +impl SubmitEthereumExchangeTransactionProof for SubstrateClient { + async fn verify_exchange_transaction_proof( + &self, + proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, + ) -> RpcResult { + let call = EXCH_API_FILTER_TRANSACTION_PROOF.to_string(); + let data = Bytes(proof.encode()); + + let encoded_response = self.state_call(call, data, None).await?; + let is_allowed: bool = Decode::decode(&mut &encoded_response.0[..])?; + + Ok(is_allowed) + } + + async fn submit_exchange_transaction_proof( + &self, + params: RialtoSigningParams, + instance: Arc, + proof: rialto_runtime::exchange::EthereumTransactionInclusionProof, + ) -> RpcResult<()> { + self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| { + Bytes( + Rialto::sign_transaction( + *self.genesis_hash(), + ¶ms, + transaction_nonce, + instance.build_currency_exchange_call(proof), + ) + .encode(), + ) + }) + .await?; + Ok(()) + } +} + +/// Create unsigned Substrate transaction for submitting Ethereum header. +fn create_unsigned_submit_transaction(call: rialto_runtime::Call) -> rialto_runtime::UncheckedExtrinsic { + rialto_runtime::UncheckedExtrinsic::new_unsigned(call) +} diff --git a/polkadot/relays/bin-ethereum/src/rpc_errors.rs b/polkadot/relays/bin-ethereum/src/rpc_errors.rs new file mode 100644 index 00000000000..27b233135f3 --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/rpc_errors.rs @@ -0,0 +1,85 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use relay_ethereum_client::Error as EthereumNodeError; +use relay_substrate_client::Error as SubstrateNodeError; +use relay_utils::MaybeConnectionError; + +/// Contains common errors that can occur when +/// interacting with a Substrate or Ethereum node +/// through RPC. +#[derive(Debug)] +pub enum RpcError { + /// The arguments to the RPC method failed to serialize. + Serialization(serde_json::Error), + /// An error occured when interacting with an Ethereum node. + Ethereum(EthereumNodeError), + /// An error occured when interacting with a Substrate node. + Substrate(SubstrateNodeError), + /// Error running relay loop. + SyncLoop(String), +} + +impl From for String { + fn from(err: RpcError) -> Self { + match err { + RpcError::Serialization(e) => e.to_string(), + RpcError::Ethereum(e) => e.to_string(), + RpcError::Substrate(e) => e.to_string(), + RpcError::SyncLoop(e) => e, + } + } +} + +impl From for RpcError { + fn from(err: serde_json::Error) -> Self { + Self::Serialization(err) + } +} + +impl From for RpcError { + fn from(err: EthereumNodeError) -> Self { + Self::Ethereum(err) + } +} + +impl From for RpcError { + fn from(err: SubstrateNodeError) -> Self { + Self::Substrate(err) + } +} + +impl From for RpcError { + fn from(err: ethabi::Error) -> Self { + Self::Ethereum(EthereumNodeError::ResponseParseFailed(format!("{}", err))) + } +} + +impl MaybeConnectionError for RpcError { + fn is_connection_error(&self) -> bool { + match self { + RpcError::Ethereum(ref error) => error.is_connection_error(), + RpcError::Substrate(ref error) => error.is_connection_error(), + _ => false, + } + } +} + +impl From for RpcError { + fn from(err: codec::Error) -> Self { + Self::Substrate(SubstrateNodeError::ResponseParseFailed(err)) + } +} diff --git a/polkadot/relays/bin-ethereum/src/substrate_sync_loop.rs b/polkadot/relays/bin-ethereum/src/substrate_sync_loop.rs new file mode 100644 index 00000000000..4e7e433d826 --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/substrate_sync_loop.rs @@ -0,0 +1,200 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto-Substrate -> Ethereum PoA synchronization. + +use crate::ethereum_client::EthereumHighLevelRpc; +use crate::rpc_errors::RpcError; + +use async_trait::async_trait; +use codec::Encode; +use headers_relay::{ + sync::HeadersSyncParams, + sync_loop::TargetClient, + sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}, +}; +use relay_ethereum_client::{ + types::Address, Client as EthereumClient, ConnectionParams as EthereumConnectionParams, + SigningParams as EthereumSigningParams, +}; +use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SyncHeader as RialtoSyncHeader}; +use relay_substrate_client::{ + headers_source::HeadersSource, Chain as SubstrateChain, Client as SubstrateClient, + ConnectionParams as SubstrateConnectionParams, +}; +use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient}; +use sp_runtime::EncodedJustification; + +use std::fmt::Debug; +use std::{collections::HashSet, time::Duration}; + +pub mod consts { + use super::*; + + /// Interval at which we check new Ethereum blocks. + pub const ETHEREUM_TICK_INTERVAL: Duration = Duration::from_secs(5); + /// Max Ethereum headers we want to have in all 'before-submitted' states. + pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 8; + /// Max Ethereum headers count we want to have in 'submitted' state. + pub const MAX_SUBMITTED_HEADERS: usize = 4; + /// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned). + pub const PRUNE_DEPTH: u32 = 256; +} + +/// Substrate synchronization parameters. +#[derive(Debug)] +pub struct SubstrateSyncParams { + /// Substrate connection params. + pub sub_params: SubstrateConnectionParams, + /// Ethereum connection params. + pub eth_params: EthereumConnectionParams, + /// Ethereum signing params. + pub eth_sign: EthereumSigningParams, + /// Ethereum bridge contract address. + pub eth_contract_address: Address, + /// Synchronization parameters. + pub sync_params: HeadersSyncParams, + /// Metrics parameters. + pub metrics_params: MetricsParams, +} + +/// Substrate synchronization pipeline. +#[derive(Clone, Copy, Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct SubstrateHeadersSyncPipeline; + +impl HeadersSyncPipeline for SubstrateHeadersSyncPipeline { + const SOURCE_NAME: &'static str = "Substrate"; + const TARGET_NAME: &'static str = "Ethereum"; + + type Hash = rialto_runtime::Hash; + type Number = rialto_runtime::BlockNumber; + type Header = RialtoSyncHeader; + type Extra = (); + type Completion = EncodedJustification; + + fn estimate_size(source: &QueuedHeader) -> usize { + source.header().encode().len() + } +} + +/// Queued substrate header ID. +pub type QueuedRialtoHeader = QueuedHeader; + +/// Rialto node as headers source. +type SubstrateHeadersSource = HeadersSource; + +/// Ethereum client as Substrate headers target. +#[derive(Clone)] +struct EthereumHeadersTarget { + /// Ethereum node client. + client: EthereumClient, + /// Bridge contract address. + contract: Address, + /// Ethereum signing params. + sign_params: EthereumSigningParams, +} + +impl EthereumHeadersTarget { + fn new(client: EthereumClient, contract: Address, sign_params: EthereumSigningParams) -> Self { + Self { + client, + contract, + sign_params, + } + } +} + +#[async_trait] +impl RelayClient for EthereumHeadersTarget { + type Error = RpcError; + + async fn reconnect(&mut self) -> Result<(), RpcError> { + self.client.reconnect().await.map_err(Into::into) + } +} + +#[async_trait] +impl TargetClient for EthereumHeadersTarget { + async fn best_header_id(&self) -> Result { + // we can't continue to relay headers if Ethereum node is out of sync, because + // it may have already received (some of) headers that we're going to relay + self.client.ensure_synced().await?; + + self.client.best_substrate_block(self.contract).await + } + + async fn is_known_header(&self, id: RialtoHeaderId) -> Result<(RialtoHeaderId, bool), RpcError> { + self.client.substrate_header_known(self.contract, id).await + } + + async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { + self.client + .submit_substrate_headers(self.sign_params.clone(), self.contract, headers) + .await + } + + async fn incomplete_headers_ids(&self) -> Result, RpcError> { + self.client.incomplete_substrate_headers(self.contract).await + } + + async fn complete_header( + &self, + id: RialtoHeaderId, + completion: EncodedJustification, + ) -> Result { + self.client + .complete_substrate_header(self.sign_params.clone(), self.contract, id, completion) + .await + } + + async fn requires_extra(&self, header: QueuedRialtoHeader) -> Result<(RialtoHeaderId, bool), RpcError> { + Ok((header.header().id(), false)) + } +} + +/// Run Substrate headers synchronization. +pub async fn run(params: SubstrateSyncParams) -> Result<(), RpcError> { + let SubstrateSyncParams { + sub_params, + eth_params, + eth_sign, + eth_contract_address, + sync_params, + metrics_params, + } = params; + + let eth_client = EthereumClient::new(eth_params).await?; + let sub_client = SubstrateClient::::new(sub_params).await?; + + let target = EthereumHeadersTarget::new(eth_client, eth_contract_address, eth_sign); + let source = SubstrateHeadersSource::new(sub_client); + + headers_relay::sync_loop::run( + source, + Rialto::AVERAGE_BLOCK_INTERVAL, + target, + consts::ETHEREUM_TICK_INTERVAL, + (), + sync_params, + metrics_params, + futures::future::pending(), + ) + .await + .map_err(RpcError::SyncLoop)?; + + Ok(()) +} diff --git a/polkadot/relays/bin-ethereum/src/substrate_types.rs b/polkadot/relays/bin-ethereum/src/substrate_types.rs new file mode 100644 index 00000000000..af68d7e0285 --- /dev/null +++ b/polkadot/relays/bin-ethereum/src/substrate_types.rs @@ -0,0 +1,76 @@ +// Copyright 2020-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Converting between Ethereum headers and bridge module types. + +use bp_eth_poa::{ + AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry, Receipt as SubstrateEthereumReceipt, + TransactionOutcome as SubstrateEthereumTransactionOutcome, +}; +use relay_ethereum_client::types::{ + Header as EthereumHeader, Receipt as EthereumReceipt, HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF, +}; + +/// Convert Ethereum header into Ethereum header for Substrate. +pub fn into_substrate_ethereum_header(header: &EthereumHeader) -> SubstrateEthereumHeader { + SubstrateEthereumHeader { + parent_hash: header.parent_hash, + timestamp: header.timestamp.as_u64(), + number: header.number.expect(ETHEREUM_HEADER_ID_PROOF).as_u64(), + author: header.author, + transactions_root: header.transactions_root, + uncles_hash: header.uncles_hash, + extra_data: header.extra_data.0.clone(), + state_root: header.state_root, + receipts_root: header.receipts_root, + log_bloom: header.logs_bloom.unwrap_or_default().data().into(), + gas_used: header.gas_used, + gas_limit: header.gas_limit, + difficulty: header.difficulty, + seal: header.seal_fields.iter().map(|s| s.0.clone()).collect(), + } +} + +/// Convert Ethereum transactions receipts into Ethereum transactions receipts for Substrate. +pub fn into_substrate_ethereum_receipts( + receipts: &Option>, +) -> Option> { + receipts + .as_ref() + .map(|receipts| receipts.iter().map(into_substrate_ethereum_receipt).collect()) +} + +/// Convert Ethereum transactions receipt into Ethereum transactions receipt for Substrate. +pub fn into_substrate_ethereum_receipt(receipt: &EthereumReceipt) -> SubstrateEthereumReceipt { + SubstrateEthereumReceipt { + gas_used: receipt.cumulative_gas_used, + log_bloom: receipt.logs_bloom.data().into(), + logs: receipt + .logs + .iter() + .map(|log_entry| SubstrateEthereumLogEntry { + address: log_entry.address, + topics: log_entry.topics.clone(), + data: log_entry.data.0.clone(), + }) + .collect(), + outcome: match (receipt.status, receipt.root) { + (Some(status), None) => SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8), + (None, Some(root)) => SubstrateEthereumTransactionOutcome::StateRoot(root), + _ => SubstrateEthereumTransactionOutcome::Unknown, + }, + } +} diff --git a/polkadot/relays/bin-substrate/Cargo.toml b/polkadot/relays/bin-substrate/Cargo.toml new file mode 100644 index 00000000000..d203201e60a --- /dev/null +++ b/polkadot/relays/bin-substrate/Cargo.toml @@ -0,0 +1,61 @@ +[package] +name = "substrate-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +anyhow = "1.0" +async-std = "1.9.0" +async-trait = "0.1.42" +codec = { package = "parity-scale-codec", version = "2.0.0" } +futures = "0.3.12" +hex = "0.4" +log = "0.4.14" +num-format = "0.4" +num-traits = "0.2" +paste = "1.0" +structopt = "0.3" + +# Bridge dependencies + +bp-header-chain = { path = "../../primitives/header-chain" } +bp-kusama = { path = "../../primitives/chain-kusama" } +bp-messages = { path = "../../primitives/messages" } +bp-millau = { path = "../../primitives/chain-millau" } +bp-polkadot = { path = "../../primitives/chain-polkadot" } +bp-rialto = { path = "../../primitives/chain-rialto" } +bp-rococo = { path = "../../primitives/chain-rococo" } +bp-runtime = { path = "../../primitives/runtime" } +bp-westend = { path = "../../primitives/chain-westend" } +bridge-runtime-common = { path = "../../bin/runtime-common" } +finality-grandpa = { version = "0.14.0" } +finality-relay = { path = "../finality" } +headers-relay = { path = "../headers" } +messages-relay = { path = "../messages" } +millau-runtime = { path = "../../bin/millau/runtime" } +pallet-bridge-dispatch = { path = "../../modules/dispatch" } +pallet-bridge-messages = { path = "../../modules/messages" } +relay-kusama-client = { path = "../client-kusama" } +relay-millau-client = { path = "../client-millau" } +relay-polkadot-client = { path = "../client-polkadot" } +relay-rialto-client = { path = "../client-rialto" } +relay-rococo-client = { path = "../client-rococo" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } +relay-westend-client = { path = "../client-westend" } +rialto-runtime = { path = "../../bin/rialto/runtime" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } + +[dev-dependencies] +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +hex-literal = "0.3" diff --git a/polkadot/relays/bin-substrate/src/chains/millau.rs b/polkadot/relays/bin-substrate/src/chains/millau.rs new file mode 100644 index 00000000000..ac5e611fdbc --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/millau.rs @@ -0,0 +1,101 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Millau chain specification for CLI. + +use crate::cli::{ + bridge, + encode_call::{self, Call, CliEncodeCall}, + encode_message, send_message, CliChain, +}; +use codec::Decode; +use frame_support::weights::{GetDispatchInfo, Weight}; +use pallet_bridge_dispatch::{CallOrigin, MessagePayload}; +use relay_millau_client::Millau; +use sp_version::RuntimeVersion; + +impl CliEncodeCall for Millau { + fn max_extrinsic_size() -> u32 { + bp_millau::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Raw { data } => Decode::decode(&mut &*data.0)?, + Call::Remark { remark_payload, .. } => millau_runtime::Call::System(millau_runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + )), + Call::Transfer { recipient, amount } => millau_runtime::Call::Balances( + millau_runtime::BalancesCall::transfer(recipient.raw_id(), amount.cast()), + ), + Call::BridgeSendMessage { + lane, + payload, + fee, + bridge_instance_index, + } => match *bridge_instance_index { + bridge::MILLAU_TO_RIALTO_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + millau_runtime::Call::BridgeRialtoMessages(millau_runtime::MessagesCall::send_message( + lane.0, + payload, + fee.cast(), + )) + } + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + }) + } +} + +impl CliChain for Millau { + const RUNTIME_VERSION: RuntimeVersion = millau_runtime::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = MessagePayload>; + + fn ss58_format() -> u16 { + millau_runtime::SS58Prefix::get() as u16 + } + + fn max_extrinsic_weight() -> Weight { + bp_millau::max_extrinsic_weight() + } + + // TODO [#854|#843] support multiple bridges? + fn encode_message(message: encode_message::MessagePayload) -> Result { + match message { + encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) + .map_err(|e| format!("Failed to decode Millau's MessagePayload: {:?}", e)), + encode_message::MessagePayload::Call { mut call, mut sender } => { + type Source = Millau; + type Target = relay_rialto_client::Rialto; + + sender.enforce_chain::(); + let spec_version = Target::RUNTIME_VERSION.spec_version; + let origin = CallOrigin::SourceAccount(sender.raw_id()); + encode_call::preprocess_call::(&mut call, bridge::MILLAU_TO_RIALTO_INDEX); + let call = Target::encode_call(&call).map_err(|e| e.to_string())?; + let weight = call.get_dispatch_info().weight; + + Ok(send_message::message_payload(spec_version, weight, origin, &call)) + } + } + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs b/polkadot/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs new file mode 100644 index 00000000000..58f0620b076 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs @@ -0,0 +1,53 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Millau-to-Rialto headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_millau_client::{Millau, SyncHeader as MillauSyncHeader}; +use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; + +/// Millau-to-Rialto finality sync pipeline. +pub(crate) type MillauFinalityToRialto = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + + type TargetChain = Rialto; + + fn transactions_author(&self) -> bp_rialto::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: MillauSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof(header.into_inner(), proof).into(); + + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs b/polkadot/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs new file mode 100644 index 00000000000..d96fa7b7972 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs @@ -0,0 +1,245 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Millau-to-Rialto messages sync entrypoint. + +use crate::messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, +}; +use crate::messages_source::SubstrateMessagesSource; +use crate::messages_target::SubstrateMessagesTarget; + +use bp_messages::MessageNonce; +use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use codec::Encode; +use frame_support::dispatch::GetDispatchInfo; +use messages_relay::message_lane::MessageLane; +use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; +use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; +use relay_substrate_client::{ + metrics::{FloatStorageValueMetric, StorageProofOverheadMetric}, + Chain, TransactionSignScheme, +}; +use sp_core::{Bytes, Pair}; +use std::{ops::RangeInclusive, time::Duration}; + +/// Millau-to-Rialto message lane. +pub type MillauMessagesToRialto = + SubstrateMessageLaneToSubstrate; + +impl SubstrateMessageLane for MillauMessagesToRialto { + const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = + bp_rialto::TO_RIALTO_MESSAGES_DISPATCH_WEIGHT_METHOD; + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = + bp_rialto::TO_RIALTO_LATEST_GENERATED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD; + + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = + bp_millau::FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + + type SourceChain = Millau; + type TargetChain = Rialto; + + fn source_transactions_author(&self) -> bp_rialto::AccountId { + (*self.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_block: RialtoHeaderId, + proof: ::MessagesReceivingProof, + ) -> Bytes { + let (relayers_state, proof) = proof; + let call: millau_runtime::Call = + millau_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into(); + let call_weight = call.get_dispatch_info().weight; + let genesis_hash = *self.source_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Rialto -> Millau confirmation transaction. Weight: {}/{}, size: {}/{}", + call_weight, + bp_millau::max_extrinsic_weight(), + transaction.encode().len(), + bp_millau::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } + + fn target_transactions_author(&self) -> bp_rialto::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_header: MillauHeaderId, + _nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Bytes { + let (dispatch_weight, proof) = proof; + let FromBridgedChainMessagesProof { + ref nonces_start, + ref nonces_end, + .. + } = proof; + let messages_count = nonces_end - nonces_start + 1; + let call: rialto_runtime::Call = rialto_runtime::MessagesCall::receive_messages_proof( + self.relayer_id_at_source.clone(), + proof, + messages_count as _, + dispatch_weight, + ) + .into(); + let call_weight = call.get_dispatch_info().weight; + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Millau -> Rialto delivery transaction. Weight: {}/{}, size: {}/{}", + call_weight, + bp_rialto::max_extrinsic_weight(), + transaction.encode().len(), + bp_rialto::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } +} + +/// Millau node as messages source. +type MillauSourceClient = SubstrateMessagesSource< + Millau, + MillauMessagesToRialto, + millau_runtime::Runtime, + millau_runtime::WithRialtoMessagesInstance, +>; + +/// Rialto node as messages target. +type RialtoTargetClient = SubstrateMessagesTarget< + Rialto, + MillauMessagesToRialto, + rialto_runtime::Runtime, + rialto_runtime::WithMillauMessagesInstance, +>; + +/// Run Millau-to-Rialto messages sync. +pub async fn run( + params: MessagesRelayParams, +) -> Result<(), String> { + let stall_timeout = Duration::from_secs(5 * 60); + let relayer_id_at_millau = (*params.source_sign.public().as_array_ref()).into(); + + let lane_id = params.lane_id; + let source_client = params.source_client; + let lane = MillauMessagesToRialto { + source_client: source_client.clone(), + source_sign: params.source_sign, + target_client: params.target_client.clone(), + target_sign: params.target_sign, + relayer_id_at_source: relayer_id_at_millau, + }; + + // 2/3 is reserved for proofs and tx overhead + let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() as usize / 3; + // TODO: use Millau weights after https://github.com/paritytech/parity-bridges-common/issues/390 + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + select_delivery_transaction_limits::>( + bp_rialto::max_extrinsic_weight(), + bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + + log::info!( + target: "bridge", + "Starting Millau -> Rialto messages relay.\n\t\ + Millau relayer account id: {:?}\n\t\ + Max messages in single transaction: {}\n\t\ + Max messages size in single transaction: {}\n\t\ + Max messages weight in single transaction: {}", + lane.relayer_id_at_source, + max_messages_in_single_batch, + max_messages_size_in_single_batch, + max_messages_weight_in_single_batch, + ); + + messages_relay::message_lane_loop::run( + messages_relay::message_lane_loop::Params { + lane: lane_id, + source_tick: Millau::AVERAGE_BLOCK_INTERVAL, + target_tick: Rialto::AVERAGE_BLOCK_INTERVAL, + reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, + stall_timeout, + delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { + max_unrewarded_relayer_entries_at_target: bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_messages_in_single_batch, + max_messages_weight_in_single_batch, + max_messages_size_in_single_batch, + }, + }, + MillauSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + RIALTO_BRIDGE_INSTANCE, + params.target_to_source_headers_relay, + ), + RialtoTargetClient::new( + params.target_client, + lane, + lane_id, + MILLAU_BRIDGE_INSTANCE, + params.source_to_target_headers_relay, + ), + relay_utils::relay_metrics( + Some(messages_relay::message_lane_loop::metrics_prefix::< + MillauMessagesToRialto, + >(&lane_id)), + params.metrics_params, + ) + .standalone_metric(|registry, prefix| { + StorageProofOverheadMetric::new( + registry, + prefix, + source_client.clone(), + "millau_storage_proof_overhead".into(), + "Millau storage proof overhead".into(), + ) + })? + .standalone_metric(|registry, prefix| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + registry, + prefix, + source_client, + sp_core::storage::StorageKey( + millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec(), + ), + Some(millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE), + "millau_rialto_to_millau_conversion_rate".into(), + "Rialto to Millau tokens conversion rate (used by Rialto)".into(), + ) + })? + .into_params(), + futures::future::pending(), + ) + .await +} diff --git a/polkadot/relays/bin-substrate/src/chains/mod.rs b/polkadot/relays/bin-substrate/src/chains/mod.rs new file mode 100644 index 00000000000..9e6a7361d5f --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/mod.rs @@ -0,0 +1,335 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Chain-specific relayer configuration. + +pub mod millau_headers_to_rialto; +pub mod millau_messages_to_rialto; +pub mod rialto_headers_to_millau; +pub mod rialto_messages_to_millau; +pub mod rococo_headers_to_westend; +pub mod westend_headers_to_millau; +pub mod westend_headers_to_rococo; + +mod millau; +mod rialto; +mod rococo; +mod westend; + +use relay_utils::metrics::{FloatJsonValueMetric, MetricsParams}; + +pub(crate) fn add_polkadot_kusama_price_metrics( + params: MetricsParams, +) -> anyhow::Result { + Ok( + relay_utils::relay_metrics(Some(finality_relay::metrics_prefix::()), params) + // Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <-> Kusama + // relays, but we want to test metrics/dashboards in advance + .standalone_metric(|registry, prefix| { + FloatJsonValueMetric::new( + registry, + prefix, + "https://api.coingecko.com/api/v3/simple/price?ids=Polkadot&vs_currencies=btc".into(), + "$.polkadot.btc".into(), + "polkadot_to_base_conversion_rate".into(), + "Rate used to convert from DOT to some BASE tokens".into(), + ) + }) + .map_err(|e| anyhow::format_err!("{}", e))? + .standalone_metric(|registry, prefix| { + FloatJsonValueMetric::new( + registry, + prefix, + "https://api.coingecko.com/api/v3/simple/price?ids=Kusama&vs_currencies=btc".into(), + "$.kusama.btc".into(), + "kusama_to_base_conversion_rate".into(), + "Rate used to convert from KSM to some BASE tokens".into(), + ) + }) + .map_err(|e| anyhow::format_err!("{}", e))? + .into_params(), + ) +} + +#[cfg(test)] +mod tests { + use crate::cli::{encode_call, send_message}; + use bp_messages::source_chain::TargetHeaderChain; + use codec::Encode; + use frame_support::dispatch::GetDispatchInfo; + use relay_millau_client::Millau; + use relay_rialto_client::Rialto; + use relay_substrate_client::TransactionSignScheme; + use sp_core::Pair; + use sp_runtime::traits::{IdentifyAccount, Verify}; + + #[test] + fn millau_signature_is_valid_on_rialto() { + let millau_sign = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); + + let call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); + + let millau_public: bp_millau::AccountSigner = millau_sign.public().into(); + let millau_account_id: bp_millau::AccountId = millau_public.into_account(); + + let digest = millau_runtime::rialto_account_ownership_digest( + &call, + millau_account_id, + rialto_runtime::VERSION.spec_version, + ); + + let rialto_signer = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); + let signature = rialto_signer.sign(&digest); + + assert!(signature.verify(&digest[..], &rialto_signer.public())); + } + + #[test] + fn rialto_signature_is_valid_on_millau() { + let rialto_sign = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); + + let call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); + + let rialto_public: bp_rialto::AccountSigner = rialto_sign.public().into(); + let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account(); + + let digest = rialto_runtime::millau_account_ownership_digest( + &call, + rialto_account_id, + millau_runtime::VERSION.spec_version, + ); + + let millau_signer = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); + let signature = millau_signer.sign(&digest); + + assert!(signature.verify(&digest[..], &millau_signer.public())); + } + + #[test] + fn maximal_rialto_to_millau_message_arguments_size_is_computed_correctly() { + use rialto_runtime::millau_messages::Millau; + + let maximal_remark_size = encode_call::compute_maximal_message_arguments_size( + bp_rialto::max_extrinsic_size(), + bp_millau::max_extrinsic_size(), + ); + + let call: millau_runtime::Call = millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into(); + let payload = send_message::message_payload( + Default::default(), + call.get_dispatch_info().weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert_eq!(Millau::verify_message(&payload), Ok(())); + + let call: millau_runtime::Call = + millau_runtime::SystemCall::remark(vec![42; (maximal_remark_size + 1) as _]).into(); + let payload = send_message::message_payload( + Default::default(), + call.get_dispatch_info().weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert!(Millau::verify_message(&payload).is_err()); + } + + #[test] + fn maximal_size_remark_to_rialto_is_generated_correctly() { + assert!( + bridge_runtime_common::messages::target::maximal_incoming_message_size( + bp_rialto::max_extrinsic_size() + ) > bp_millau::max_extrinsic_size(), + "We can't actually send maximal messages to Rialto from Millau, because Millau extrinsics can't be that large", + ) + } + + #[test] + fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() { + use rialto_runtime::millau_messages::Millau; + + let maximal_dispatch_weight = + send_message::compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight()); + let call: millau_runtime::Call = rialto_runtime::SystemCall::remark(vec![]).into(); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert_eq!(Millau::verify_message(&payload), Ok(())); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight + 1, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert!(Millau::verify_message(&payload).is_err()); + } + + #[test] + fn maximal_weight_fill_block_to_rialto_is_generated_correctly() { + use millau_runtime::rialto_messages::Rialto; + + let maximal_dispatch_weight = + send_message::compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight()); + let call: rialto_runtime::Call = millau_runtime::SystemCall::remark(vec![]).into(); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert_eq!(Rialto::verify_message(&payload), Ok(())); + + let payload = send_message::message_payload( + Default::default(), + maximal_dispatch_weight + 1, + pallet_bridge_dispatch::CallOrigin::SourceRoot, + &call, + ); + assert!(Rialto::verify_message(&payload).is_err()); + } + + #[test] + fn rialto_tx_extra_bytes_constant_is_correct() { + let rialto_call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![])); + let rialto_tx = Rialto::sign_transaction( + Default::default(), + &sp_keyring::AccountKeyring::Alice.pair(), + 0, + rialto_call.clone(), + ); + let extra_bytes_in_transaction = rialto_tx.encode().len() - rialto_call.encode().len(); + assert!( + bp_rialto::TX_EXTRA_BYTES as usize >= extra_bytes_in_transaction, + "Hardcoded number of extra bytes in Rialto transaction {} is lower than actual value: {}", + bp_rialto::TX_EXTRA_BYTES, + extra_bytes_in_transaction, + ); + } + + #[test] + fn millau_tx_extra_bytes_constant_is_correct() { + let millau_call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![])); + let millau_tx = Millau::sign_transaction( + Default::default(), + &sp_keyring::AccountKeyring::Alice.pair(), + 0, + millau_call.clone(), + ); + let extra_bytes_in_transaction = millau_tx.encode().len() - millau_call.encode().len(); + assert!( + bp_millau::TX_EXTRA_BYTES as usize >= extra_bytes_in_transaction, + "Hardcoded number of extra bytes in Millau transaction {} is lower than actual value: {}", + bp_millau::TX_EXTRA_BYTES, + extra_bytes_in_transaction, + ); + } +} + +#[cfg(test)] +mod rococo_tests { + use bp_header_chain::justification::GrandpaJustification; + use codec::Encode; + + #[test] + fn scale_compatibility_of_bridges_call() { + // given + let header = sp_runtime::generic::Header { + parent_hash: Default::default(), + number: Default::default(), + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: sp_runtime::generic::Digest { logs: vec![] }, + }; + + let justification = GrandpaJustification { + round: 0, + commit: finality_grandpa::Commit { + target_hash: Default::default(), + target_number: Default::default(), + precommits: vec![], + }, + votes_ancestries: vec![], + }; + + let actual = bp_rococo::BridgeGrandpaWestendCall::submit_finality_proof(header.clone(), justification.clone()); + let expected = millau_runtime::BridgeGrandpaRialtoCall::::submit_finality_proof( + header, + justification, + ); + + // when + let actual_encoded = actual.encode(); + let expected_encoded = expected.encode(); + + // then + assert_eq!( + actual_encoded, expected_encoded, + "\n\nEncoding difference.\nGot {:#?} \nExpected: {:#?}", + actual, expected + ); + } +} + +#[cfg(test)] +mod westend_tests { + use bp_header_chain::justification::GrandpaJustification; + use codec::Encode; + + #[test] + fn scale_compatibility_of_bridges_call() { + // given + let header = sp_runtime::generic::Header { + parent_hash: Default::default(), + number: Default::default(), + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: sp_runtime::generic::Digest { logs: vec![] }, + }; + + let justification = GrandpaJustification { + round: 0, + commit: finality_grandpa::Commit { + target_hash: Default::default(), + target_number: Default::default(), + precommits: vec![], + }, + votes_ancestries: vec![], + }; + + let actual = bp_westend::BridgeGrandpaRococoCall::submit_finality_proof(header.clone(), justification.clone()); + let expected = millau_runtime::BridgeGrandpaRialtoCall::::submit_finality_proof( + header, + justification, + ); + + // when + let actual_encoded = actual.encode(); + let expected_encoded = expected.encode(); + + // then + assert_eq!( + actual_encoded, expected_encoded, + "\n\nEncoding difference.\nGot {:#?} \nExpected: {:#?}", + actual, expected + ); + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/rialto.rs b/polkadot/relays/bin-substrate/src/chains/rialto.rs new file mode 100644 index 00000000000..25c1ab04c9f --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/rialto.rs @@ -0,0 +1,98 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto chain specification for CLI. + +use crate::cli::{ + bridge, + encode_call::{self, Call, CliEncodeCall}, + encode_message, send_message, CliChain, +}; +use codec::Decode; +use frame_support::weights::{GetDispatchInfo, Weight}; +use pallet_bridge_dispatch::{CallOrigin, MessagePayload}; +use relay_rialto_client::Rialto; +use sp_version::RuntimeVersion; + +impl CliEncodeCall for Rialto { + fn max_extrinsic_size() -> u32 { + bp_rialto::max_extrinsic_size() + } + + fn encode_call(call: &Call) -> anyhow::Result { + Ok(match call { + Call::Raw { data } => Decode::decode(&mut &*data.0)?, + Call::Remark { remark_payload, .. } => rialto_runtime::Call::System(rialto_runtime::SystemCall::remark( + remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), + )), + Call::Transfer { recipient, amount } => { + rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer(recipient.raw_id(), amount.0)) + } + Call::BridgeSendMessage { + lane, + payload, + fee, + bridge_instance_index, + } => match *bridge_instance_index { + bridge::RIALTO_TO_MILLAU_INDEX => { + let payload = Decode::decode(&mut &*payload.0)?; + rialto_runtime::Call::BridgeMillauMessages(rialto_runtime::MessagesCall::send_message( + lane.0, payload, fee.0, + )) + } + _ => anyhow::bail!( + "Unsupported target bridge pallet with instance index: {}", + bridge_instance_index + ), + }, + }) + } +} + +impl CliChain for Rialto { + const RUNTIME_VERSION: RuntimeVersion = rialto_runtime::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = MessagePayload>; + + fn ss58_format() -> u16 { + rialto_runtime::SS58Prefix::get() as u16 + } + + fn max_extrinsic_weight() -> Weight { + bp_rialto::max_extrinsic_weight() + } + + fn encode_message(message: encode_message::MessagePayload) -> Result { + match message { + encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) + .map_err(|e| format!("Failed to decode Rialto's MessagePayload: {:?}", e)), + encode_message::MessagePayload::Call { mut call, mut sender } => { + type Source = Rialto; + type Target = relay_millau_client::Millau; + + sender.enforce_chain::(); + let spec_version = Target::RUNTIME_VERSION.spec_version; + let origin = CallOrigin::SourceAccount(sender.raw_id()); + encode_call::preprocess_call::(&mut call, bridge::RIALTO_TO_MILLAU_INDEX); + let call = Target::encode_call(&call).map_err(|e| e.to_string())?; + let weight = call.get_dispatch_info().weight; + + Ok(send_message::message_payload(spec_version, weight, origin, &call)) + } + } + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs b/polkadot/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs new file mode 100644 index 00000000000..39295c89433 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs @@ -0,0 +1,57 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto-to-Millau headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; +use relay_rialto_client::{Rialto, SyncHeader as RialtoSyncHeader}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; + +/// Rialto-to-Millau finality sync pipeline. +pub(crate) type RialtoFinalityToMillau = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for RialtoFinalityToMillau { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + + type TargetChain = Millau; + + fn transactions_author(&self) -> bp_millau::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: RialtoSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = millau_runtime::BridgeGrandpaRialtoCall::< + millau_runtime::Runtime, + millau_runtime::RialtoGrandpaInstance, + >::submit_finality_proof(header.into_inner(), proof) + .into(); + + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs b/polkadot/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs new file mode 100644 index 00000000000..ec39a4caa3f --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs @@ -0,0 +1,244 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rialto-to-Millau messages sync entrypoint. + +use crate::messages_lane::{ + select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate, +}; +use crate::messages_source::SubstrateMessagesSource; +use crate::messages_target::SubstrateMessagesTarget; + +use bp_messages::MessageNonce; +use bp_runtime::{MILLAU_BRIDGE_INSTANCE, RIALTO_BRIDGE_INSTANCE}; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use codec::Encode; +use frame_support::dispatch::GetDispatchInfo; +use messages_relay::message_lane::MessageLane; +use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams}; +use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams}; +use relay_substrate_client::{ + metrics::{FloatStorageValueMetric, StorageProofOverheadMetric}, + Chain, TransactionSignScheme, +}; +use sp_core::{Bytes, Pair}; +use std::{ops::RangeInclusive, time::Duration}; + +/// Rialto-to-Millau message lane. +pub type RialtoMessagesToMillau = + SubstrateMessageLaneToSubstrate; + +impl SubstrateMessageLane for RialtoMessagesToMillau { + const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str = + bp_millau::TO_MILLAU_MESSAGES_DISPATCH_WEIGHT_METHOD; + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = + bp_millau::TO_MILLAU_LATEST_GENERATED_NONCE_METHOD; + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD; + + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD; + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = + bp_rialto::FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD; + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE; + + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; + + type SourceChain = Rialto; + type TargetChain = Millau; + + fn source_transactions_author(&self) -> bp_rialto::AccountId { + (*self.source_sign.public().as_array_ref()).into() + } + + fn make_messages_receiving_proof_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_block: MillauHeaderId, + proof: ::MessagesReceivingProof, + ) -> Bytes { + let (relayers_state, proof) = proof; + let call: rialto_runtime::Call = + rialto_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into(); + let call_weight = call.get_dispatch_info().weight; + let genesis_hash = *self.source_client.genesis_hash(); + let transaction = Rialto::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Millau -> Rialto confirmation transaction. Weight: {}/{}, size: {}/{}", + call_weight, + bp_rialto::max_extrinsic_weight(), + transaction.encode().len(), + bp_rialto::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } + + fn target_transactions_author(&self) -> bp_rialto::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_messages_delivery_transaction( + &self, + transaction_nonce: ::Index, + _generated_at_header: RialtoHeaderId, + _nonces: RangeInclusive, + proof: ::MessagesProof, + ) -> Bytes { + let (dispatch_weight, proof) = proof; + let FromBridgedChainMessagesProof { + ref nonces_start, + ref nonces_end, + .. + } = proof; + let messages_count = nonces_end - nonces_start + 1; + let call: millau_runtime::Call = millau_runtime::MessagesCall::receive_messages_proof( + self.relayer_id_at_source.clone(), + proof, + messages_count as _, + dispatch_weight, + ) + .into(); + let call_weight = call.get_dispatch_info().weight; + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + log::trace!( + target: "bridge", + "Prepared Rialto -> Millau delivery transaction. Weight: {}/{}, size: {}/{}", + call_weight, + bp_millau::max_extrinsic_weight(), + transaction.encode().len(), + bp_millau::max_extrinsic_size(), + ); + Bytes(transaction.encode()) + } +} + +/// Rialto node as messages source. +type RialtoSourceClient = SubstrateMessagesSource< + Rialto, + RialtoMessagesToMillau, + rialto_runtime::Runtime, + rialto_runtime::WithMillauMessagesInstance, +>; + +/// Millau node as messages target. +type MillauTargetClient = SubstrateMessagesTarget< + Millau, + RialtoMessagesToMillau, + millau_runtime::Runtime, + millau_runtime::WithRialtoMessagesInstance, +>; + +/// Run Rialto-to-Millau messages sync. +pub async fn run( + params: MessagesRelayParams, +) -> Result<(), String> { + let stall_timeout = Duration::from_secs(5 * 60); + let relayer_id_at_rialto = (*params.source_sign.public().as_array_ref()).into(); + + let lane_id = params.lane_id; + let source_client = params.source_client; + let lane = RialtoMessagesToMillau { + source_client: source_client.clone(), + source_sign: params.source_sign, + target_client: params.target_client.clone(), + target_sign: params.target_sign, + relayer_id_at_source: relayer_id_at_rialto, + }; + + // 2/3 is reserved for proofs and tx overhead + let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() as usize / 3; + let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = + select_delivery_transaction_limits::>( + bp_millau::max_extrinsic_weight(), + bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + + log::info!( + target: "bridge", + "Starting Rialto -> Millau messages relay.\n\t\ + Rialto relayer account id: {:?}\n\t\ + Max messages in single transaction: {}\n\t\ + Max messages size in single transaction: {}\n\t\ + Max messages weight in single transaction: {}", + lane.relayer_id_at_source, + max_messages_in_single_batch, + max_messages_size_in_single_batch, + max_messages_weight_in_single_batch, + ); + + messages_relay::message_lane_loop::run( + messages_relay::message_lane_loop::Params { + lane: lane_id, + source_tick: Rialto::AVERAGE_BLOCK_INTERVAL, + target_tick: Millau::AVERAGE_BLOCK_INTERVAL, + reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, + stall_timeout, + delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { + max_unrewarded_relayer_entries_at_target: bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + max_unconfirmed_nonces_at_target: bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE, + max_messages_in_single_batch, + max_messages_weight_in_single_batch, + max_messages_size_in_single_batch, + }, + }, + RialtoSourceClient::new( + source_client.clone(), + lane.clone(), + lane_id, + MILLAU_BRIDGE_INSTANCE, + params.target_to_source_headers_relay, + ), + MillauTargetClient::new( + params.target_client, + lane, + lane_id, + RIALTO_BRIDGE_INSTANCE, + params.source_to_target_headers_relay, + ), + relay_utils::relay_metrics( + Some(messages_relay::message_lane_loop::metrics_prefix::< + RialtoMessagesToMillau, + >(&lane_id)), + params.metrics_params, + ) + .standalone_metric(|registry, prefix| { + StorageProofOverheadMetric::new( + registry, + prefix, + source_client.clone(), + "rialto_storage_proof_overhead".into(), + "Rialto storage proof overhead".into(), + ) + })? + .standalone_metric(|registry, prefix| { + FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new( + registry, + prefix, + source_client, + sp_core::storage::StorageKey( + rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec(), + ), + Some(rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE), + "rialto_millau_to_rialto_conversion_rate".into(), + "Millau to Rialto tokens conversion rate (used by Millau)".into(), + ) + })? + .into_params(), + futures::future::pending(), + ) + .await +} diff --git a/polkadot/relays/bin-substrate/src/chains/rococo.rs b/polkadot/relays/bin-substrate/src/chains/rococo.rs new file mode 100644 index 00000000000..0bcf388c346 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/rococo.rs @@ -0,0 +1,39 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{encode_message, CliChain}; +use frame_support::weights::Weight; +use relay_rococo_client::Rococo; +use sp_version::RuntimeVersion; + +impl CliChain for Rococo { + const RUNTIME_VERSION: RuntimeVersion = bp_rococo::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = (); + + fn ss58_format() -> u16 { + 42 + } + + fn max_extrinsic_weight() -> Weight { + 0 + } + + fn encode_message(_message: encode_message::MessagePayload) -> Result { + Err("Sending messages from Rococo is not yet supported.".into()) + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/rococo_headers_to_westend.rs b/polkadot/relays/bin-substrate/src/chains/rococo_headers_to_westend.rs new file mode 100644 index 00000000000..dca91adb3df --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/rococo_headers_to_westend.rs @@ -0,0 +1,60 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Rococo-to-Westend headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_rococo_client::{Rococo, SyncHeader as RococoSyncHeader}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_utils::metrics::MetricsParams; +use relay_westend_client::{SigningParams as WestendSigningParams, Westend}; +use sp_core::{Bytes, Pair}; + +/// Rococo-to-Westend finality sync pipeline. +pub(crate) type RococoFinalityToWestend = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for RococoFinalityToWestend { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; + + type TargetChain = Westend; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn transactions_author(&self) -> bp_westend::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: RococoSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = bp_westend::Call::BridgeGrandpaRococo(bp_westend::BridgeGrandpaRococoCall::submit_finality_proof( + header.into_inner(), + proof, + )); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Westend::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/westend.rs b/polkadot/relays/bin-substrate/src/chains/westend.rs new file mode 100644 index 00000000000..27621472d6d --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/westend.rs @@ -0,0 +1,41 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Westend chain specification for CLI. + +use crate::cli::{encode_message, CliChain}; +use frame_support::weights::Weight; +use relay_westend_client::Westend; +use sp_version::RuntimeVersion; + +impl CliChain for Westend { + const RUNTIME_VERSION: RuntimeVersion = bp_westend::VERSION; + + type KeyPair = sp_core::sr25519::Pair; + type MessagePayload = (); + + fn ss58_format() -> u16 { + 42 + } + + fn max_extrinsic_weight() -> Weight { + 0 + } + + fn encode_message(_message: encode_message::MessagePayload) -> Result { + Err("Sending messages from Westend is not yet supported.".into()) + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/polkadot/relays/bin-substrate/src/chains/westend_headers_to_millau.rs new file mode 100644 index 00000000000..1523dc1be58 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/westend_headers_to_millau.rs @@ -0,0 +1,62 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Westend-to-Millau headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_millau_client::{Millau, SigningParams as MillauSigningParams}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_utils::metrics::MetricsParams; +use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend}; +use sp_core::{Bytes, Pair}; + +/// Westend-to-Millau finality sync pipeline. +pub(crate) type WestendFinalityToMillau = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; + + type TargetChain = Millau; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn transactions_author(&self) -> bp_millau::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: WestendSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = millau_runtime::BridgeGrandpaWestendCall::< + millau_runtime::Runtime, + millau_runtime::WestendGrandpaInstance, + >::submit_finality_proof(header.into_inner(), proof) + .into(); + + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs b/polkadot/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs new file mode 100644 index 00000000000..577a858d922 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/chains/westend_headers_to_rococo.rs @@ -0,0 +1,60 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Westend-to-Rococo headers sync entrypoint. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; + +use bp_header_chain::justification::GrandpaJustification; +use codec::Encode; +use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use relay_utils::metrics::MetricsParams; +use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend}; +use sp_core::{Bytes, Pair}; + +/// Westend-to-Rococo finality sync pipeline. +pub(crate) type WestendFinalityToRococo = SubstrateFinalityToSubstrate; + +impl SubstrateFinalitySyncPipeline for WestendFinalityToRococo { + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; + + type TargetChain = Rococo; + + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + crate::chains::add_polkadot_kusama_price_metrics::(params) + } + + fn transactions_author(&self) -> bp_rococo::AccountId { + (*self.target_sign.public().as_array_ref()).into() + } + + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: WestendSyncHeader, + proof: GrandpaJustification, + ) -> Bytes { + let call = bp_rococo::Call::BridgeGrandpaWestend(bp_rococo::BridgeGrandpaWestendCall::submit_finality_proof( + header.into_inner(), + proof, + )); + let genesis_hash = *self.target_client.genesis_hash(); + let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call); + + Bytes(transaction.encode()) + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/bridge.rs b/polkadot/relays/bin-substrate/src/cli/bridge.rs new file mode 100644 index 00000000000..faf4417d1e9 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/bridge.rs @@ -0,0 +1,96 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use structopt::clap::arg_enum; + +arg_enum! { + #[derive(Debug, PartialEq, Eq)] + /// Supported full bridges (headers + messages). + pub enum FullBridge { + MillauToRialto, + RialtoToMillau, + } +} + +impl FullBridge { + /// Return instance index of the bridge pallet in source runtime. + pub fn bridge_instance_index(&self) -> u8 { + match self { + Self::MillauToRialto => MILLAU_TO_RIALTO_INDEX, + Self::RialtoToMillau => RIALTO_TO_MILLAU_INDEX, + } + } +} + +pub const RIALTO_TO_MILLAU_INDEX: u8 = 0; +pub const MILLAU_TO_RIALTO_INDEX: u8 = 0; + +/// The macro allows executing bridge-specific code without going fully generic. +/// +/// It matches on the [`FullBridge`] enum, sets bridge-specific types or imports and injects +/// the `$generic` code at every variant. +#[macro_export] +macro_rules! select_full_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + FullBridge::MillauToRialto => { + type Source = relay_millau_client::Millau; + #[allow(dead_code)] + type Target = relay_rialto_client::Rialto; + + // Derive-account + #[allow(unused_imports)] + use bp_millau::derive_account_from_rialto_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::millau_messages_to_rialto::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_rialto::TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + // Send-message + #[allow(unused_imports)] + use millau_runtime::rialto_account_ownership_digest as account_ownership_digest; + + $generic + } + FullBridge::RialtoToMillau => { + type Source = relay_rialto_client::Rialto; + #[allow(dead_code)] + type Target = relay_millau_client::Millau; + + // Derive-account + #[allow(unused_imports)] + use bp_rialto::derive_account_from_millau_id as derive_account; + + // Relay-messages + #[allow(unused_imports)] + use crate::chains::rialto_messages_to_millau::run as relay_messages; + + // Send-message / Estimate-fee + #[allow(unused_imports)] + use bp_millau::TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; + + // Send-message + #[allow(unused_imports)] + use rialto_runtime::millau_account_ownership_digest as account_ownership_digest; + + $generic + } + } + }; +} diff --git a/polkadot/relays/bin-substrate/src/cli/derive_account.rs b/polkadot/relays/bin-substrate/src/cli/derive_account.rs new file mode 100644 index 00000000000..92b32b0d479 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/derive_account.rs @@ -0,0 +1,102 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{bridge::FullBridge, AccountId}; +use crate::select_full_bridge; +use relay_substrate_client::Chain; +use structopt::StructOpt; + +/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. +/// +/// The (derived) target chain `AccountId` is going to be used as dispatch origin of the call +/// that has been sent over the bridge. +/// This account can also be used to receive target-chain funds (or other form of ownership), +/// since messages sent over the bridge will be able to spend these. +#[derive(StructOpt)] +pub struct DeriveAccount { + /// A bridge instance to initalize. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + /// Source-chain address to derive Target-chain address from. + account: AccountId, +} + +impl DeriveAccount { + /// Parse CLI arguments and derive account. + /// + /// Returns both the Source account in correct SS58 format and the derived account. + fn derive_account(&self) -> (AccountId, AccountId) { + select_full_bridge!(self.bridge, { + let mut account = self.account.clone(); + account.enforce_chain::(); + let acc = bp_runtime::SourceAccount::Account(account.raw_id()); + let id = derive_account(acc); + let derived_account = AccountId::from_raw::(id); + (account, derived_account) + }) + } + + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_full_bridge!(self.bridge, { + let (account, derived_account) = self.derive_account(); + println!("Source address:\n{} ({})", account, Source::NAME); + println!( + "->Corresponding (derived) address:\n{} ({})", + derived_account, + Target::NAME, + ); + + Ok(()) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn derive_account_cli(bridge: &str, account: &str) -> (AccountId, AccountId) { + DeriveAccount::from_iter(vec!["derive-account", bridge, account]).derive_account() + } + + #[test] + fn should_derive_accounts_correctly() { + // given + let rialto = "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU"; + let millau = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9"; + + // when + let (rialto_parsed, rialto_derived) = derive_account_cli("RialtoToMillau", rialto); + let (millau_parsed, millau_derived) = derive_account_cli("MillauToRialto", millau); + let (millau2_parsed, millau2_derived) = derive_account_cli("MillauToRialto", rialto); + + // then + assert_eq!(format!("{}", rialto_parsed), rialto); + assert_eq!(format!("{}", millau_parsed), millau); + assert_eq!(format!("{}", millau2_parsed), millau); + + assert_eq!( + format!("{}", rialto_derived), + "73gLnUwrAdH4vMjbXCiNEpgyz1PLk9JxCaY4cKzvfSZT73KE" + ); + assert_eq!( + format!("{}", millau_derived), + "5rpTJqGv1BPAYy2sXzkPpc3Wx1ZpQtgfuBsrDpNV4HsXAmbi" + ); + assert_eq!(millau_derived, millau2_derived); + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/encode_call.rs b/polkadot/relays/bin-substrate/src/cli/encode_call.rs new file mode 100644 index 00000000000..6e1130cffc1 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/encode_call.rs @@ -0,0 +1,275 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::{AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId}; +use crate::select_full_bridge; +use frame_support::dispatch::GetDispatchInfo; +use relay_substrate_client::Chain; +use structopt::StructOpt; + +/// Encode source chain runtime call. +#[derive(StructOpt, Debug)] +pub struct EncodeCall { + /// A bridge instance to encode call for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + call: Call, +} + +/// All possible messages that may be delivered to generic Substrate chain. +/// +/// Note this enum may be used in the context of both Source (as part of `encode-call`) +/// and Target chain (as part of `encode-message/send-message`). +#[derive(StructOpt, Debug, PartialEq, Eq)] +pub enum Call { + /// Raw bytes for the message + Raw { + /// Raw, SCALE-encoded message + data: HexBytes, + }, + /// Make an on-chain remark (comment). + Remark { + /// Explicit remark payload. + #[structopt(long, conflicts_with("remark-size"))] + remark_payload: Option, + /// Remark size. If not passed, small UTF8-encoded string is generated by relay as remark. + #[structopt(long, conflicts_with("remark-payload"))] + remark_size: Option>, + }, + /// Transfer the specified `amount` of native tokens to a particular `recipient`. + Transfer { + /// Address of an account to receive the transfer. + #[structopt(long)] + recipient: AccountId, + /// Amount of target tokens to send in target chain base currency units. + #[structopt(long)] + amount: Balance, + }, + /// A call to the specific Bridge Messages pallet to queue message to be sent over a bridge. + BridgeSendMessage { + /// An index of the bridge instance which represents the expected target chain. + #[structopt(skip = 255)] + bridge_instance_index: u8, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Raw SCALE-encoded Message Payload to submit to the messages pallet. + /// + /// This can be obtained by encoding call for the target chain. + #[structopt(long)] + payload: HexBytes, + /// Declared delivery and dispatch fee in base source-chain currency units. + #[structopt(long)] + fee: Balance, + }, +} + +pub trait CliEncodeCall: Chain { + /// Maximal size (in bytes) of any extrinsic (from the runtime). + fn max_extrinsic_size() -> u32; + + /// Encode a CLI call. + fn encode_call(call: &Call) -> anyhow::Result; +} + +impl EncodeCall { + fn encode(&mut self) -> anyhow::Result { + select_full_bridge!(self.bridge, { + preprocess_call::(&mut self.call, self.bridge.bridge_instance_index()); + let call = Source::encode_call(&self.call)?; + + let encoded = HexBytes::encode(&call); + + log::info!(target: "bridge", "Generated {} call: {:#?}", Source::NAME, call); + log::info!(target: "bridge", "Weight of {} call: {}", Source::NAME, call.get_dispatch_info().weight); + log::info!(target: "bridge", "Encoded {} call: {:?}", Source::NAME, encoded); + + Ok(encoded) + }) + } + + /// Run the command. + pub async fn run(mut self) -> anyhow::Result<()> { + println!("{:?}", self.encode()?); + Ok(()) + } +} + +/// Prepare the call to be passed to [`CliEncodeCall::encode_call`]. +/// +/// This function will fill in all optional and missing pieces and will make sure that +/// values are converted to bridge-specific ones. +/// +/// Most importantly, the method will fill-in [`bridge_instance_index`] parameter for +/// target-chain specific calls. +pub(crate) fn preprocess_call( + call: &mut Call, + bridge_instance: u8, +) { + match *call { + Call::Raw { .. } => {} + Call::Remark { + ref remark_size, + ref mut remark_payload, + } => { + if remark_payload.is_none() { + *remark_payload = Some(HexBytes(generate_remark_payload( + &remark_size, + compute_maximal_message_arguments_size(Source::max_extrinsic_size(), Target::max_extrinsic_size()), + ))); + } + } + Call::Transfer { ref mut recipient, .. } => { + recipient.enforce_chain::(); + } + Call::BridgeSendMessage { + ref mut bridge_instance_index, + .. + } => { + *bridge_instance_index = bridge_instance; + } + }; +} + +fn generate_remark_payload(remark_size: &Option>, maximal_allowed_size: u32) -> Vec { + match remark_size { + Some(ExplicitOrMaximal::Explicit(remark_size)) => vec![0; *remark_size], + Some(ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _], + None => format!( + "Unix time: {}", + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + ) + .as_bytes() + .to_vec(), + } +} + +pub(crate) fn compute_maximal_message_arguments_size( + maximal_source_extrinsic_size: u32, + maximal_target_extrinsic_size: u32, +) -> u32 { + // assume that both signed extensions and other arguments fit 1KB + let service_tx_bytes_on_source_chain = 1024; + let maximal_source_extrinsic_size = maximal_source_extrinsic_size - service_tx_bytes_on_source_chain; + let maximal_call_size = + bridge_runtime_common::messages::target::maximal_incoming_message_size(maximal_target_extrinsic_size); + let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size { + maximal_source_extrinsic_size + } else { + maximal_call_size + }; + + // bytes in Call encoding that are used to encode everything except arguments + let service_bytes = 1 + 1 + 4; + maximal_call_size - service_bytes +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_encode_transfer_call() { + // given + let mut encode_call = EncodeCall::from_iter(vec![ + "encode-call", + "RialtoToMillau", + "transfer", + "--amount", + "12345", + "--recipient", + "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU", + ]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert_eq!( + format!("{:?}", hex), + "0x0c00d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0" + ); + } + + #[test] + fn should_encode_remark_with_default_payload() { + // given + let mut encode_call = EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark"]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert!(format!("{:?}", hex).starts_with("0x070154556e69782074696d653a")); + } + + #[test] + fn should_encode_remark_with_explicit_payload() { + // given + let mut encode_call = EncodeCall::from_iter(vec![ + "encode-call", + "RialtoToMillau", + "remark", + "--remark-payload", + "1234", + ]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), "0x0701081234"); + } + + #[test] + fn should_encode_remark_with_size() { + // given + let mut encode_call = + EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark", "--remark-size", "12"]); + + // when + let hex = encode_call.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), "0x070130000000000000000000000000"); + } + + #[test] + fn should_disallow_both_payload_and_size() { + // when + let err = EncodeCall::from_iter_safe(vec![ + "encode-call", + "RialtoToMillau", + "remark", + "--remark-payload", + "1234", + "--remark-size", + "12", + ]) + .unwrap_err(); + + // then + assert_eq!(err.kind, structopt::clap::ErrorKind::ArgumentConflict); + + let info = err.info.unwrap(); + assert!(info.contains(&"remark-payload".to_string()) | info.contains(&"remark-size".to_string())) + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/encode_message.rs b/polkadot/relays/bin-substrate/src/cli/encode_message.rs new file mode 100644 index 00000000000..a29aa8597d6 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/encode_message.rs @@ -0,0 +1,106 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{bridge::FullBridge, AccountId, CliChain, HexBytes}; +use crate::select_full_bridge; +use structopt::StructOpt; + +/// Generic message payload. +#[derive(StructOpt, Debug, PartialEq, Eq)] +pub enum MessagePayload { + /// Raw, SCALE-encoded `MessagePayload`. + Raw { + /// Hex-encoded SCALE data. + data: HexBytes, + }, + /// Construct message to send over the bridge. + Call { + /// Message details. + #[structopt(flatten)] + call: crate::cli::encode_call::Call, + /// SS58 encoded Source account that will send the payload. + #[structopt(long)] + sender: AccountId, + }, +} + +/// A `MessagePayload` to encode. +#[derive(StructOpt)] +pub struct EncodeMessage { + /// A bridge instance to initalize. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + payload: MessagePayload, +} + +impl EncodeMessage { + /// Run the command. + pub fn encode(self) -> anyhow::Result { + select_full_bridge!(self.bridge, { + let payload = Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?; + Ok(HexBytes::encode(&payload)) + }) + } + + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + let payload = self.encode()?; + println!("{:?}", payload); + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::crypto::Ss58Codec; + + #[test] + fn should_encode_raw_message() { + // given + let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d3c040130000000000000000000000000"; + let encode_message = EncodeMessage::from_iter(vec!["encode-message", "MillauToRialto", "raw", msg]); + + // when + let hex = encode_message.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), format!("0x{}", msg)); + } + + #[test] + fn should_encode_remark_with_size() { + // given + let sender = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check(); + let encode_message = EncodeMessage::from_iter(vec![ + "encode-message", + "RialtoToMillau", + "call", + "--sender", + &sender, + "remark", + "--remark-size", + "12", + ]); + + // when + let hex = encode_message.encode().unwrap(); + + // then + assert_eq!(format!("{:?}", hex), "0x01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d3c040130000000000000000000000000"); + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/estimate_fee.rs b/polkadot/relays/bin-substrate/src/cli/estimate_fee.rs new file mode 100644 index 00000000000..4e39ad351ed --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/estimate_fee.rs @@ -0,0 +1,128 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::{Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams}; +use crate::select_full_bridge; +use codec::{Decode, Encode}; +use relay_substrate_client::{Chain, ChainWithBalances}; +use structopt::StructOpt; + +/// Estimate Delivery & Dispatch Fee command. +#[derive(StructOpt, Debug, PartialEq, Eq)] +pub struct EstimateFee { + /// A bridge instance to encode call for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + source: SourceConnectionParams, + /// Hex-encoded id of lane that will be delivering the message. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Payload to send over the bridge. + #[structopt(flatten)] + payload: crate::cli::encode_message::MessagePayload, +} + +impl EstimateFee { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + let Self { + source, + bridge, + lane, + payload, + } = self; + + select_full_bridge!(bridge, { + let source_client = source.to_client::().await?; + let lane = lane.into(); + let payload = Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?; + + let fee: ::NativeBalance = + estimate_message_delivery_and_dispatch_fee(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload) + .await?; + + log::info!(target: "bridge", "Fee: {:?}", Balance(fee as _)); + println!("{}", fee); + Ok(()) + }) + } +} + +pub(crate) async fn estimate_message_delivery_and_dispatch_fee( + client: &relay_substrate_client::Client, + estimate_fee_method: &str, + lane: bp_messages::LaneId, + payload: P, +) -> anyhow::Result { + let encoded_response = client + .state_call(estimate_fee_method.into(), (lane, payload).encode().into(), None) + .await?; + let decoded_response: Option = + Decode::decode(&mut &encoded_response.0[..]).map_err(relay_substrate_client::Error::ResponseParseFailed)?; + let fee = decoded_response + .ok_or_else(|| anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec())))?; + Ok(fee) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::cli::encode_call; + use sp_core::crypto::Ss58Codec; + + #[test] + fn should_parse_cli_options() { + // given + let alice = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check(); + + // when + let res = EstimateFee::from_iter(vec![ + "estimate_fee", + "RialtoToMillau", + "--source-port", + "1234", + "call", + "--sender", + &alice, + "remark", + "--remark-payload", + "1234", + ]); + + // then + assert_eq!( + res, + EstimateFee { + bridge: FullBridge::RialtoToMillau, + lane: HexLaneId([0, 0, 0, 0]), + source: SourceConnectionParams { + source_host: "127.0.0.1".into(), + source_port: 1234, + source_secure: false, + }, + payload: crate::cli::encode_message::MessagePayload::Call { + sender: alice.parse().unwrap(), + call: encode_call::Call::Remark { + remark_payload: Some(HexBytes(vec![0x12, 0x34])), + remark_size: None, + } + } + } + ); + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/init_bridge.rs b/polkadot/relays/bin-substrate/src/cli/init_bridge.rs new file mode 100644 index 00000000000..cdd8ec36916 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/init_bridge.rs @@ -0,0 +1,162 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{SourceConnectionParams, TargetConnectionParams, TargetSigningParams}; +use bp_header_chain::InitializationData; +use bp_runtime::Chain as ChainBase; +use codec::Encode; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; +use structopt::{clap::arg_enum, StructOpt}; + +/// Initialize bridge pallet. +#[derive(StructOpt)] +pub struct InitBridge { + /// A bridge instance to initalize. + #[structopt(possible_values = &InitBridgeName::variants(), case_insensitive = true)] + bridge: InitBridgeName, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, +} + +// TODO [#851] Use kebab-case. +arg_enum! { + #[derive(Debug)] + /// Bridge to initialize. + pub enum InitBridgeName { + MillauToRialto, + RialtoToMillau, + WestendToMillau, + WestendToRococo, + RococoToWestend, + } +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + InitBridgeName::MillauToRialto => { + type Source = relay_millau_client::Millau; + type Target = relay_rialto_client::Rialto; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + rialto_runtime::SudoCall::sudo(Box::new( + rialto_runtime::BridgeGrandpaMillauCall::initialize(init_data).into(), + )) + .into() + } + + $generic + } + InitBridgeName::RialtoToMillau => { + type Source = relay_rialto_client::Rialto; + type Target = relay_millau_client::Millau; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + let initialize_call = millau_runtime::BridgeGrandpaRialtoCall::< + millau_runtime::Runtime, + millau_runtime::RialtoGrandpaInstance, + >::initialize(init_data); + millau_runtime::SudoCall::sudo(Box::new(initialize_call.into())).into() + } + + $generic + } + InitBridgeName::WestendToMillau => { + type Source = relay_westend_client::Westend; + type Target = relay_millau_client::Millau; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + // at Westend -> Millau initialization we're not using sudo, because otherwise our deployments + // may fail, because we need to initialize both Rialto -> Millau and Westend -> Millau bridge. + // => since there's single possible sudo account, one of transaction may fail with duplicate nonce error + millau_runtime::BridgeGrandpaWestendCall::< + millau_runtime::Runtime, + millau_runtime::WestendGrandpaInstance, + >::initialize(init_data) + .into() + } + + $generic + } + InitBridgeName::WestendToRococo => { + type Source = relay_westend_client::Westend; + type Target = relay_rococo_client::Rococo; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + bp_rococo::Call::BridgeGrandpaWestend(bp_rococo::BridgeGrandpaWestendCall::initialize(init_data)) + } + + $generic + } + InitBridgeName::RococoToWestend => { + type Source = relay_rococo_client::Rococo; + type Target = relay_westend_client::Westend; + + fn encode_init_bridge( + init_data: InitializationData<::Header>, + ) -> ::Call { + bp_westend::Call::BridgeGrandpaRococo(bp_westend::BridgeGrandpaRococoCall::initialize(init_data)) + } + + $generic + } + } + }; +} + +impl InitBridge { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self.bridge, { + let source_client = self.source.to_client::().await?; + let target_client = self.target.to_client::().await?; + let target_sign = self.target_sign.to_keypair::()?; + + crate::headers_initialize::initialize( + source_client, + target_client.clone(), + target_sign.public().into(), + move |transaction_nonce, initialization_data| { + Bytes( + Target::sign_transaction( + *target_client.genesis_hash(), + &target_sign, + transaction_nonce, + encode_init_bridge(initialization_data), + ) + .encode(), + ) + }, + ) + .await; + + Ok(()) + }) + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/mod.rs b/polkadot/relays/bin-substrate/src/cli/mod.rs new file mode 100644 index 00000000000..505ef11ee2a --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/mod.rs @@ -0,0 +1,444 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Deal with CLI args of substrate-to-substrate relay. + +use std::convert::TryInto; + +use bp_messages::LaneId; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; +use sp_runtime::app_crypto::Ss58Codec; +use structopt::{clap::arg_enum, StructOpt}; + +pub(crate) mod bridge; +pub(crate) mod encode_call; +pub(crate) mod encode_message; +pub(crate) mod estimate_fee; +pub(crate) mod send_message; + +mod derive_account; +mod init_bridge; +mod relay_headers; +mod relay_headers_and_messages; +mod relay_messages; + +/// Parse relay CLI args. +pub fn parse_args() -> Command { + Command::from_args() +} + +/// Substrate-to-Substrate bridge utilities. +#[derive(StructOpt)] +#[structopt(about = "Substrate-to-Substrate relay")] +pub enum Command { + /// Start headers relay between two chains. + /// + /// The on-chain bridge component should have been already initialized with + /// `init-bridge` sub-command. + RelayHeaders(relay_headers::RelayHeaders), + /// Start messages relay between two chains. + /// + /// Ties up to `Messages` pallets on both chains and starts relaying messages. + /// Requires the header relay to be already running. + RelayMessages(relay_messages::RelayMessages), + /// Start headers and messages relay between two Substrate chains. + /// + /// This high-level relay internally starts four low-level relays: two `RelayHeaders` + /// and two `RelayMessages` relays. Headers are only relayed when they are required by + /// the message relays - i.e. when there are messages or confirmations that needs to be + /// relayed between chains. + RelayHeadersAndMessages(relay_headers_and_messages::RelayHeadersAndMessages), + /// Initialize on-chain bridge pallet with current header data. + /// + /// Sends initialization transaction to bootstrap the bridge with current finalized block data. + InitBridge(init_bridge::InitBridge), + /// Send custom message over the bridge. + /// + /// Allows interacting with the bridge by sending messages over `Messages` component. + /// The message is being sent to the source chain, delivered to the target chain and dispatched + /// there. + SendMessage(send_message::SendMessage), + /// Generate SCALE-encoded `Call` for choosen network. + /// + /// The call can be used either as message payload or can be wrapped into a transaction + /// and executed on the chain directly. + EncodeCall(encode_call::EncodeCall), + /// Generate SCALE-encoded `MessagePayload` object that can be sent over selected bridge. + /// + /// The `MessagePayload` can be then fed to `Messages::send_message` function and sent over + /// the bridge. + EncodeMessage(encode_message::EncodeMessage), + /// Estimate Delivery and Dispatch Fee required for message submission to messages pallet. + EstimateFee(estimate_fee::EstimateFee), + /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. + DeriveAccount(derive_account::DeriveAccount), +} + +impl Command { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + match self { + Self::RelayHeaders(arg) => arg.run().await?, + Self::RelayMessages(arg) => arg.run().await?, + Self::RelayHeadersAndMessages(arg) => arg.run().await?, + Self::InitBridge(arg) => arg.run().await?, + Self::SendMessage(arg) => arg.run().await?, + Self::EncodeCall(arg) => arg.run().await?, + Self::EncodeMessage(arg) => arg.run().await?, + Self::EstimateFee(arg) => arg.run().await?, + Self::DeriveAccount(arg) => arg.run().await?, + } + Ok(()) + } +} + +arg_enum! { + #[derive(Debug)] + /// The origin to use when dispatching the message on the target chain. + /// + /// - `Target` uses account existing on the target chain (requires target private key). + /// - `Origin` uses account derived from the source-chain account. + pub enum Origins { + Target, + Source, + } +} + +/// Generic balance type. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Balance(pub u128); + +impl std::fmt::Display for Balance { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + use num_format::{Locale, ToFormattedString}; + write!(fmt, "{}", self.0.to_formatted_string(&Locale::en)) + } +} + +impl std::str::FromStr for Balance { + type Err = ::Err; + + fn from_str(s: &str) -> Result { + Ok(Self(s.parse()?)) + } +} + +impl Balance { + /// Cast balance to `u64` type, panicking if it's too large. + pub fn cast(&self) -> u64 { + self.0.try_into().expect("Balance is too high for this chain.") + } +} + +/// Generic account id with custom parser. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct AccountId { + account: sp_runtime::AccountId32, + ss58_format: sp_core::crypto::Ss58AddressFormat, +} + +impl std::fmt::Display for AccountId { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "{}", self.account.to_ss58check_with_version(self.ss58_format)) + } +} + +impl std::str::FromStr for AccountId { + type Err = String; + + fn from_str(s: &str) -> Result { + let (account, ss58_format) = sp_runtime::AccountId32::from_ss58check_with_version(s) + .map_err(|err| format!("Unable to decode SS58 address: {:?}", err))?; + Ok(Self { account, ss58_format }) + } +} + +const SS58_FORMAT_PROOF: &str = "u16 -> Ss58Format is infallible; qed"; + +impl AccountId { + /// Create new SS58-formatted address from raw account id. + pub fn from_raw(account: sp_runtime::AccountId32) -> Self { + Self { + account, + ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF), + } + } + + /// Enforces formatting account to be for given [`CliChain`] type. + /// + /// This will change the `ss58format` of the account to match the requested one. + /// Note that a warning will be produced in case the current format does not match + /// the requested one, but the conversion always succeeds. + pub fn enforce_chain(&mut self) { + let original = self.clone(); + self.ss58_format = T::ss58_format().try_into().expect(SS58_FORMAT_PROOF); + log::debug!("{} SS58 format: {} (RAW: {})", self, self.ss58_format, self.account); + if original.ss58_format != self.ss58_format { + log::warn!( + target: "bridge", + "Address {} does not seem to match {}'s SS58 format (got: {}, expected: {}).\nConverted to: {}", + original, + T::NAME, + original.ss58_format, + self.ss58_format, + self, + ) + } + } + + /// Returns the raw (no SS58-prefixed) account id. + pub fn raw_id(&self) -> sp_runtime::AccountId32 { + self.account.clone() + } +} + +/// Bridge-supported network definition. +/// +/// Used to abstract away CLI commands. +pub trait CliChain: relay_substrate_client::Chain { + /// Chain's current version of the runtime. + const RUNTIME_VERSION: sp_version::RuntimeVersion; + + /// Crypto keypair type used to send messages. + /// + /// In case of chains supporting multiple cryptos, pick one used by the CLI. + type KeyPair: sp_core::crypto::Pair; + + /// Bridge Message Payload type. + /// + /// TODO [#854] This should be removed in favour of target-specifc types. + type MessagePayload; + + /// Numeric value of SS58 format. + fn ss58_format() -> u16; + + /// Construct message payload to be sent over the bridge. + fn encode_message(message: crate::cli::encode_message::MessagePayload) -> Result; + + /// Maximal extrinsic weight (from the runtime). + fn max_extrinsic_weight() -> Weight; +} + +/// Lane id. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct HexLaneId(pub LaneId); + +impl From for LaneId { + fn from(lane_id: HexLaneId) -> LaneId { + lane_id.0 + } +} + +impl std::str::FromStr for HexLaneId { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + let mut lane_id = LaneId::default(); + hex::decode_to_slice(s, &mut lane_id)?; + Ok(HexLaneId(lane_id)) + } +} + +/// Nicer formatting for raw bytes vectors. +#[derive(Default, Encode, Decode, PartialEq, Eq)] +pub struct HexBytes(pub Vec); + +impl std::str::FromStr for HexBytes { + type Err = hex::FromHexError; + + fn from_str(s: &str) -> Result { + Ok(Self(hex::decode(s)?)) + } +} + +impl std::fmt::Debug for HexBytes { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "0x{}", self) + } +} + +impl std::fmt::Display for HexBytes { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "{}", hex::encode(&self.0)) + } +} + +impl HexBytes { + /// Encode given object and wrap into nicely formatted bytes. + pub fn encode(t: &T) -> Self { + Self(t.encode()) + } +} + +/// Prometheus metrics params. +#[derive(StructOpt)] +pub struct PrometheusParams { + /// Do not expose a Prometheus metric endpoint. + #[structopt(long)] + pub no_prometheus: bool, + /// Expose Prometheus endpoint at given interface. + #[structopt(long, default_value = "127.0.0.1")] + pub prometheus_host: String, + /// Expose Prometheus endpoint at given port. + #[structopt(long, default_value = "9616")] + pub prometheus_port: u16, +} + +impl From for relay_utils::metrics::MetricsParams { + fn from(cli_params: PrometheusParams) -> relay_utils::metrics::MetricsParams { + if !cli_params.no_prometheus { + Some(relay_utils::metrics::MetricsAddress { + host: cli_params.prometheus_host, + port: cli_params.prometheus_port, + }) + .into() + } else { + None.into() + } + } +} + +/// Either explicit or maximal allowed value. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum ExplicitOrMaximal { + /// User has explicitly specified argument value. + Explicit(V), + /// Maximal allowed value for this argument. + Maximal, +} + +impl std::str::FromStr for ExplicitOrMaximal +where + V::Err: std::fmt::Debug, +{ + type Err = String; + + fn from_str(s: &str) -> Result { + if s.to_lowercase() == "max" { + return Ok(ExplicitOrMaximal::Maximal); + } + + V::from_str(s) + .map(ExplicitOrMaximal::Explicit) + .map_err(|e| format!("Failed to parse '{:?}'. Expected 'max' or explicit value", e)) + } +} + +/// Create chain-specific set of configuration objects: connection parameters, +/// signing parameters and bridge initialisation parameters. +#[macro_export] +macro_rules! declare_chain_options { + ($chain:ident, $chain_prefix:ident) => { + paste::item! { + #[doc = $chain " connection params."] + #[derive(StructOpt, Debug, PartialEq, Eq)] + pub struct [<$chain ConnectionParams>] { + #[doc = "Connect to " $chain " node at given host."] + #[structopt(long, default_value = "127.0.0.1")] + pub [<$chain_prefix _host>]: String, + #[doc = "Connect to " $chain " node websocket server at given port."] + #[structopt(long)] + pub [<$chain_prefix _port>]: u16, + #[doc = "Use secure websocket connection."] + #[structopt(long)] + pub [<$chain_prefix _secure>]: bool, + } + + #[doc = $chain " signing params."] + #[derive(StructOpt, Debug, PartialEq, Eq)] + pub struct [<$chain SigningParams>] { + #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _signer>]: String, + #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] + #[structopt(long)] + pub [<$chain_prefix _signer_password>]: Option, + } + + impl [<$chain SigningParams>] { + /// Parse signing params into chain-specific KeyPair. + pub fn to_keypair(&self) -> anyhow::Result { + use sp_core::crypto::Pair; + + Chain::KeyPair::from_string( + &self.[<$chain_prefix _signer>], + self.[<$chain_prefix _signer_password>].as_deref() + ).map_err(|e| anyhow::format_err!("{:?}", e)) + } + } + + impl [<$chain ConnectionParams>] { + /// Convert connection params into Substrate client. + pub async fn to_client( + &self, + ) -> anyhow::Result> { + Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { + host: self.[<$chain_prefix _host>].clone(), + port: self.[<$chain_prefix _port>], + secure: self.[<$chain_prefix _secure>], + }) + .await? + ) + } + } + } + }; +} + +declare_chain_options!(Source, source); +declare_chain_options!(Target, target); + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::*; + + #[test] + fn should_format_addresses_with_ss58_format() { + // given + let rialto1 = "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU"; + let rialto2 = "5rERgaT1Z8nM3et2epA5i1VtEBfp5wkhwHtVE8HK7BRbjAH2"; + let millau1 = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9"; + let millau2 = "74GNQjmkcfstRftSQPJgMREchqHM56EvAUXRc266cZ1NYVW5"; + + let expected = vec![rialto1, rialto2, millau1, millau2]; + + // when + let parsed = expected + .iter() + .map(|s| AccountId::from_str(s).unwrap()) + .collect::>(); + + let actual = parsed.iter().map(|a| format!("{}", a)).collect::>(); + + assert_eq!(actual, expected) + } + + #[test] + fn hex_bytes_display_matches_from_str_for_clap() { + // given + let hex = HexBytes(vec![1, 2, 3, 4]); + let display = format!("{}", hex); + + // when + let hex2: HexBytes = display.parse().unwrap(); + + // then + assert_eq!(hex.0, hex2.0); + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/relay_headers.rs b/polkadot/relays/bin-substrate/src/cli/relay_headers.rs new file mode 100644 index 00000000000..346790f2ae7 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/relay_headers.rs @@ -0,0 +1,110 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::{PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams}; +use crate::finality_pipeline::SubstrateFinalitySyncPipeline; +use structopt::{clap::arg_enum, StructOpt}; + +/// Start headers relayer process. +#[derive(StructOpt)] +pub struct RelayHeaders { + /// A bridge instance to relay headers for. + #[structopt(possible_values = &RelayHeadersBridge::variants(), case_insensitive = true)] + bridge: RelayHeadersBridge, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + #[structopt(flatten)] + prometheus_params: PrometheusParams, +} + +// TODO [#851] Use kebab-case. +arg_enum! { + #[derive(Debug)] + /// Headers relay bridge. + pub enum RelayHeadersBridge { + MillauToRialto, + RialtoToMillau, + WestendToMillau, + WestendToRococo, + RococoToWestend, + } +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + RelayHeadersBridge::MillauToRialto => { + type Source = relay_millau_client::Millau; + type Target = relay_rialto_client::Rialto; + type Finality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; + + $generic + } + RelayHeadersBridge::RialtoToMillau => { + type Source = relay_rialto_client::Rialto; + type Target = relay_millau_client::Millau; + type Finality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; + + $generic + } + RelayHeadersBridge::WestendToMillau => { + type Source = relay_westend_client::Westend; + type Target = relay_millau_client::Millau; + type Finality = crate::chains::westend_headers_to_millau::WestendFinalityToMillau; + + $generic + } + RelayHeadersBridge::WestendToRococo => { + type Source = relay_westend_client::Westend; + type Target = relay_rococo_client::Rococo; + type Finality = crate::chains::westend_headers_to_rococo::WestendFinalityToRococo; + + $generic + } + RelayHeadersBridge::RococoToWestend => { + type Source = relay_rococo_client::Rococo; + type Target = relay_westend_client::Westend; + type Finality = crate::chains::rococo_headers_to_westend::RococoFinalityToWestend; + + $generic + } + } + }; +} + +impl RelayHeaders { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self.bridge, { + let source_client = self.source.to_client::().await?; + let target_client = self.target.to_client::().await?; + let target_sign = self.target_sign.to_keypair::()?; + let metrics_params = Finality::customize_metrics(self.prometheus_params.into())?; + + crate::finality_pipeline::run( + Finality::new(target_client.clone(), target_sign), + source_client, + target_client, + metrics_params, + ) + .await + }) + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/polkadot/relays/bin-substrate/src/cli/relay_headers_and_messages.rs new file mode 100644 index 00000000000..98ff1268fae --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/relay_headers_and_messages.rs @@ -0,0 +1,183 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Complex headers+messages relays support. +//! +//! To add new complex relay between `ChainA` and `ChainB`, you must: +//! +//! 1) ensure that there's a `declare_chain_options!(...)` for both chains; +//! 2) add `declare_bridge_options!(...)` for the bridge; +//! 3) add bridge support to the `select_bridge! { ... }` macro. + +use crate::cli::{CliChain, HexLaneId, PrometheusParams}; +use crate::declare_chain_options; +use crate::messages_lane::MessagesRelayParams; +use crate::on_demand_headers::OnDemandHeadersRelay; + +use futures::{FutureExt, TryFutureExt}; +use relay_utils::metrics::MetricsParams; +use structopt::StructOpt; + +/// Start headers+messages relayer process. +#[derive(StructOpt)] +pub enum RelayHeadersAndMessages { + MillauRialto(MillauRialtoHeadersAndMessages), +} + +/// Parameters that have the same names across all bridges. +#[derive(StructOpt)] +pub struct HeadersAndMessagesSharedParams { + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + #[structopt(flatten)] + prometheus_params: PrometheusParams, +} + +// The reason behind this macro is that 'normal' relays are using source and target chains terminology, +// which is unusable for both-way relays (if you're relaying headers from Rialto to Millau and from +// Millau to Rialto, then which chain is source?). +macro_rules! declare_bridge_options { + ($chain1:ident, $chain2:ident) => { + paste::item! { + #[doc = $chain1 " and " $chain2 " headers+messages relay params."] + #[derive(StructOpt)] + pub struct [<$chain1 $chain2 HeadersAndMessages>] { + #[structopt(flatten)] + shared: HeadersAndMessagesSharedParams, + #[structopt(flatten)] + left: [<$chain1 ConnectionParams>], + #[structopt(flatten)] + left_sign: [<$chain1 SigningParams>], + #[structopt(flatten)] + right: [<$chain2 ConnectionParams>], + #[structopt(flatten)] + right_sign: [<$chain2 SigningParams>], + } + + #[allow(unreachable_patterns)] + impl From for [<$chain1 $chain2 HeadersAndMessages>] { + fn from(relay_params: RelayHeadersAndMessages) -> [<$chain1 $chain2 HeadersAndMessages>] { + match relay_params { + RelayHeadersAndMessages::[<$chain1 $chain2>](params) => params, + _ => unreachable!(), + } + } + } + } + }; +} + +macro_rules! select_bridge { + ($bridge: expr, $generic: tt) => { + match $bridge { + RelayHeadersAndMessages::MillauRialto(_) => { + type Params = MillauRialtoHeadersAndMessages; + + type Left = relay_millau_client::Millau; + type Right = relay_rialto_client::Rialto; + + type LeftToRightFinality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; + type RightToLeftFinality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; + + type LeftToRightMessages = crate::chains::millau_messages_to_rialto::MillauMessagesToRialto; + type RightToLeftMessages = crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau; + + use crate::chains::millau_messages_to_rialto::run as left_to_right_messages; + use crate::chains::rialto_messages_to_millau::run as right_to_left_messages; + + $generic + } + } + }; +} + +// All supported chains. +declare_chain_options!(Millau, millau); +declare_chain_options!(Rialto, rialto); +// All supported bridges. +declare_bridge_options!(Millau, Rialto); + +impl RelayHeadersAndMessages { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_bridge!(self, { + let params: Params = self.into(); + + let left_client = params.left.to_client::().await?; + let left_sign = params.left_sign.to_keypair::()?; + let right_client = params.right.to_client::().await?; + let right_sign = params.right_sign.to_keypair::()?; + + let lane = params.shared.lane.into(); + + let metrics_params: MetricsParams = params.shared.prometheus_params.into(); + let metrics_params = relay_utils::relay_metrics(None, metrics_params).into_params(); + + let left_to_right_on_demand_headers = OnDemandHeadersRelay::new( + left_client.clone(), + right_client.clone(), + LeftToRightFinality::new(right_client.clone(), right_sign.clone()), + ); + let right_to_left_on_demand_headers = OnDemandHeadersRelay::new( + right_client.clone(), + left_client.clone(), + RightToLeftFinality::new(left_client.clone(), left_sign.clone()), + ); + + let left_to_right_messages = left_to_right_messages(MessagesRelayParams { + source_client: left_client.clone(), + source_sign: left_sign.clone(), + target_client: right_client.clone(), + target_sign: right_sign.clone(), + source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()), + target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()), + lane_id: lane, + metrics_params: metrics_params + .clone() + .disable() + .metrics_prefix(messages_relay::message_lane_loop::metrics_prefix::(&lane)), + }) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); + let right_to_left_messages = right_to_left_messages(MessagesRelayParams { + source_client: right_client, + source_sign: right_sign, + target_client: left_client.clone(), + target_sign: left_sign.clone(), + source_to_target_headers_relay: Some(right_to_left_on_demand_headers), + target_to_source_headers_relay: Some(left_to_right_on_demand_headers), + lane_id: lane, + metrics_params: metrics_params + .clone() + .disable() + .metrics_prefix(messages_relay::message_lane_loop::metrics_prefix::(&lane)), + }) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); + + relay_utils::relay_metrics(None, metrics_params) + .expose() + .await + .map_err(|e| anyhow::format_err!("{}", e))?; + + futures::future::select(left_to_right_messages, right_to_left_messages) + .await + .factor_first() + .0 + }) + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/relay_messages.rs b/polkadot/relays/bin-substrate/src/cli/relay_messages.rs new file mode 100644 index 00000000000..94630886ca3 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/relay_messages.rs @@ -0,0 +1,71 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::{ + HexLaneId, PrometheusParams, SourceConnectionParams, SourceSigningParams, TargetConnectionParams, + TargetSigningParams, +}; +use crate::messages_lane::MessagesRelayParams; +use crate::select_full_bridge; + +use structopt::StructOpt; + +/// Start messages relayer process. +#[derive(StructOpt)] +pub struct RelayMessages { + /// A bridge instance to relay messages for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + #[structopt(flatten)] + target: TargetConnectionParams, + #[structopt(flatten)] + target_sign: TargetSigningParams, + #[structopt(flatten)] + prometheus_params: PrometheusParams, +} + +impl RelayMessages { + /// Run the command. + pub async fn run(self) -> anyhow::Result<()> { + select_full_bridge!(self.bridge, { + let source_client = self.source.to_client::().await?; + let source_sign = self.source_sign.to_keypair::()?; + let target_client = self.target.to_client::().await?; + let target_sign = self.target_sign.to_keypair::()?; + + relay_messages(MessagesRelayParams { + source_client, + source_sign, + target_client, + target_sign, + source_to_target_headers_relay: None, + target_to_source_headers_relay: None, + lane_id: self.lane.into(), + metrics_params: self.prometheus_params.into(), + }) + .await + .map_err(|e| anyhow::format_err!("{}", e)) + }) + } +} diff --git a/polkadot/relays/bin-substrate/src/cli/send_message.rs b/polkadot/relays/bin-substrate/src/cli/send_message.rs new file mode 100644 index 00000000000..64448f0f1db --- /dev/null +++ b/polkadot/relays/bin-substrate/src/cli/send_message.rs @@ -0,0 +1,317 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::cli::bridge::FullBridge; +use crate::cli::encode_call::{self, CliEncodeCall}; +use crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee; +use crate::cli::{ + Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams, SourceSigningParams, + TargetSigningParams, +}; +use codec::Encode; +use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; +use pallet_bridge_dispatch::{CallOrigin, MessagePayload}; +use relay_substrate_client::{Chain, TransactionSignScheme}; +use sp_core::{Bytes, Pair}; +use sp_runtime::{traits::IdentifyAccount, AccountId32, MultiSignature, MultiSigner}; +use std::fmt::Debug; +use structopt::StructOpt; + +/// Send bridge message. +#[derive(StructOpt)] +pub struct SendMessage { + /// A bridge instance to encode call for. + #[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)] + bridge: FullBridge, + #[structopt(flatten)] + source: SourceConnectionParams, + #[structopt(flatten)] + source_sign: SourceSigningParams, + // TODO [#885] Move TargetSign to origins + #[structopt(flatten)] + target_sign: TargetSigningParams, + /// Hex-encoded lane id. Defaults to `00000000`. + #[structopt(long, default_value = "00000000")] + lane: HexLaneId, + /// Dispatch weight of the message. If not passed, determined automatically. + #[structopt(long)] + dispatch_weight: Option>, + /// Delivery and dispatch fee in source chain base currency units. If not passed, determined automatically. + #[structopt(long)] + fee: Option, + /// Message type. + #[structopt(subcommand)] + message: crate::cli::encode_call::Call, + /// The origin to use when dispatching the message on the target chain. Defaults to + /// `SourceAccount`. + #[structopt(long, possible_values = &Origins::variants(), default_value = "Source")] + origin: Origins, +} + +impl SendMessage { + pub fn encode_payload( + &mut self, + ) -> anyhow::Result>> { + crate::select_full_bridge!(self.bridge, { + let SendMessage { + source_sign, + target_sign, + ref mut message, + dispatch_weight, + origin, + bridge, + .. + } = self; + + let source_sign = source_sign.to_keypair::()?; + let target_sign = target_sign.to_keypair::()?; + + encode_call::preprocess_call::(message, bridge.bridge_instance_index()); + let target_call = Target::encode_call(&message)?; + + let payload = { + let target_call_weight = prepare_call_dispatch_weight( + dispatch_weight, + ExplicitOrMaximal::Explicit(target_call.get_dispatch_info().weight), + compute_maximal_message_dispatch_weight(Target::max_extrinsic_weight()), + ); + let source_sender_public: MultiSigner = source_sign.public().into(); + let source_account_id = source_sender_public.into_account(); + + message_payload( + Target::RUNTIME_VERSION.spec_version, + target_call_weight, + match origin { + Origins::Source => CallOrigin::SourceAccount(source_account_id), + Origins::Target => { + let digest = account_ownership_digest( + &target_call, + source_account_id.clone(), + Target::RUNTIME_VERSION.spec_version, + ); + let target_origin_public = target_sign.public(); + let digest_signature = target_sign.sign(&digest); + CallOrigin::TargetAccount( + source_account_id, + target_origin_public.into(), + digest_signature.into(), + ) + } + }, + &target_call, + ) + }; + Ok(payload) + }) + } + + /// Run the command. + pub async fn run(mut self) -> anyhow::Result<()> { + crate::select_full_bridge!(self.bridge, { + let payload = self.encode_payload()?; + + let source_client = self.source.to_client::().await?; + let source_sign = self.source_sign.to_keypair::()?; + + let lane = self.lane.clone().into(); + let fee = match self.fee { + Some(fee) => fee, + None => Balance( + estimate_message_delivery_and_dispatch_fee::< + ::NativeBalance, + _, + _, + >(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload.clone()) + .await? as _, + ), + }; + let dispatch_weight = payload.weight; + let send_message_call = Source::encode_call(&encode_call::Call::BridgeSendMessage { + bridge_instance_index: self.bridge.bridge_instance_index(), + lane: self.lane, + payload: HexBytes::encode(&payload), + fee, + })?; + + source_client + .submit_signed_extrinsic(source_sign.public().into(), |transaction_nonce| { + let signed_source_call = Source::sign_transaction( + *source_client.genesis_hash(), + &source_sign, + transaction_nonce, + send_message_call, + ) + .encode(); + + log::info!( + target: "bridge", + "Sending message to {}. Size: {}. Dispatch weight: {}. Fee: {}", + Target::NAME, + signed_source_call.len(), + dispatch_weight, + fee, + ); + log::info!( + target: "bridge", + "Signed {} Call: {:?}", + Source::NAME, + HexBytes::encode(&signed_source_call) + ); + + Bytes(signed_source_call) + }) + .await?; + }); + + Ok(()) + } +} + +fn prepare_call_dispatch_weight( + user_specified_dispatch_weight: &Option>, + weight_from_pre_dispatch_call: ExplicitOrMaximal, + maximal_allowed_weight: Weight, +) -> Weight { + match user_specified_dispatch_weight + .clone() + .unwrap_or(weight_from_pre_dispatch_call) + { + ExplicitOrMaximal::Explicit(weight) => weight, + ExplicitOrMaximal::Maximal => maximal_allowed_weight, + } +} + +pub(crate) fn message_payload( + spec_version: u32, + weight: Weight, + origin: CallOrigin, + call: &impl Encode, +) -> MessagePayload> +where + SAccountId: Encode + Debug, + TPublic: Encode + Debug, + TSignature: Encode + Debug, +{ + // Display nicely formatted call. + let payload = MessagePayload { + spec_version, + weight, + origin, + call: HexBytes::encode(call), + }; + + log::info!(target: "bridge", "Created Message Payload: {:#?}", payload); + log::info!(target: "bridge", "Encoded Message Payload: {:?}", HexBytes::encode(&payload)); + + // re-pack to return `Vec` + let MessagePayload { + spec_version, + weight, + origin, + call, + } = payload; + MessagePayload { + spec_version, + weight, + origin, + call: call.0, + } +} + +pub(crate) fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { + bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight) +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + fn send_remark_rialto_to_millau() { + // given + let mut send_message = SendMessage::from_iter(vec![ + "send-message", + "RialtoToMillau", + "--source-port", + "1234", + "--source-signer", + "//Alice", + "--target-signer", + "//Bob", + "remark", + "--remark-payload", + "1234", + ]); + + // when + let payload = send_message.encode_payload().unwrap(); + + // then + assert_eq!( + payload, + MessagePayload { + spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, + weight: 1345000, + origin: CallOrigin::SourceAccount(sp_keyring::AccountKeyring::Alice.to_account_id()), + call: hex!("0401081234").to_vec(), + } + ); + } + + #[test] + fn send_remark_millau_to_rialto() { + // given + let mut send_message = SendMessage::from_iter(vec![ + "send-message", + "MillauToRialto", + "--source-port", + "1234", + "--source-signer", + "//Alice", + "--origin", + "Target", + "--target-signer", + "//Bob", + "remark", + "--remark-payload", + "1234", + ]); + + // when + let payload = send_message.encode_payload().unwrap(); + + // then + // Since signatures are randomized we extract it from here and only check the rest. + let signature = match payload.origin { + CallOrigin::TargetAccount(_, _, ref sig) => sig.clone(), + _ => panic!("Unexpected `CallOrigin`: {:?}", payload), + }; + assert_eq!( + payload, + MessagePayload { + spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, + weight: 1345000, + origin: CallOrigin::TargetAccount( + sp_keyring::AccountKeyring::Alice.to_account_id(), + sp_keyring::AccountKeyring::Bob.into(), + signature, + ), + call: hex!("0701081234").to_vec(), + } + ); + } +} diff --git a/polkadot/relays/bin-substrate/src/finality_pipeline.rs b/polkadot/relays/bin-substrate/src/finality_pipeline.rs new file mode 100644 index 00000000000..bc8461f6a83 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/finality_pipeline.rs @@ -0,0 +1,149 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate-to-Substrate headers sync entrypoint. + +use crate::finality_target::SubstrateFinalityTarget; + +use bp_header_chain::justification::GrandpaJustification; +use finality_relay::{FinalitySyncParams, FinalitySyncPipeline}; +use relay_substrate_client::{finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader}; +use relay_utils::{metrics::MetricsParams, BlockNumberBase}; +use sp_core::Bytes; +use std::{fmt::Debug, marker::PhantomData, time::Duration}; + +/// Default synchronization loop timeout. +const STALL_TIMEOUT: Duration = Duration::from_secs(120); +/// Default limit of recent finality proofs. +/// +/// Finality delay of 4096 blocks is unlikely to happen in practice in +/// Substrate+GRANDPA based chains (good to know). +const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; + +/// Headers sync pipeline for Substrate <-> Substrate relays. +pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline { + /// Name of the runtime method that returns id of best finalized source header at target chain. + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str; + + /// Chain with GRANDPA bridge pallet. + type TargetChain: Chain; + + /// Customize metrics exposed by headers sync loop. + fn customize_metrics(params: MetricsParams) -> anyhow::Result { + Ok(params) + } + + /// Returns id of account that we're using to sign transactions at target chain. + fn transactions_author(&self) -> ::AccountId; + + /// Make submit header transaction. + fn make_submit_finality_proof_transaction( + &self, + transaction_nonce: ::Index, + header: Self::Header, + proof: Self::FinalityProof, + ) -> Bytes; +} + +/// Substrate-to-Substrate finality proof pipeline. +#[derive(Clone)] +pub struct SubstrateFinalityToSubstrate { + /// Client for the target chain. + pub(crate) target_client: Client, + /// Data required to sign target chain transactions. + pub(crate) target_sign: TargetSign, + /// Unused generic arguments dump. + _marker: PhantomData, +} + +impl Debug + for SubstrateFinalityToSubstrate +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.debug_struct("SubstrateFinalityToSubstrate") + .field("target_client", &self.target_client) + .finish() + } +} + +impl SubstrateFinalityToSubstrate { + /// Create new Substrate-to-Substrate headers pipeline. + pub fn new(target_client: Client, target_sign: TargetSign) -> Self { + SubstrateFinalityToSubstrate { + target_client, + target_sign, + _marker: Default::default(), + } + } +} + +impl FinalitySyncPipeline + for SubstrateFinalityToSubstrate +where + SourceChain: Clone + Chain + Debug, + BlockNumberOf: BlockNumberBase, + TargetChain: Clone + Chain + Debug, + TargetSign: Clone + Send + Sync, +{ + const SOURCE_NAME: &'static str = SourceChain::NAME; + const TARGET_NAME: &'static str = TargetChain::NAME; + + type Hash = HashOf; + type Number = BlockNumberOf; + type Header = SyncHeader; + type FinalityProof = GrandpaJustification; +} + +/// Run Substrate-to-Substrate finality sync. +pub async fn run( + pipeline: P, + source_client: Client, + target_client: Client, + metrics_params: MetricsParams, +) -> anyhow::Result<()> +where + P: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + SourceChain: Clone + Chain, + BlockNumberOf: BlockNumberBase, + TargetChain: Clone + Chain, +{ + log::info!( + target: "bridge", + "Starting {} -> {} finality proof relay", + SourceChain::NAME, + TargetChain::NAME, + ); + + finality_relay::run( + FinalitySource::new(source_client), + SubstrateFinalityTarget::new(target_client, pipeline), + FinalitySyncParams { + tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL), + recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, + stall_timeout: STALL_TIMEOUT, + }, + metrics_params, + futures::future::pending(), + ) + .await + .map_err(|e| anyhow::format_err!("{}", e)) +} diff --git a/polkadot/relays/bin-substrate/src/finality_target.rs b/polkadot/relays/bin-substrate/src/finality_target.rs new file mode 100644 index 00000000000..ffa10cabacb --- /dev/null +++ b/polkadot/relays/bin-substrate/src/finality_target.rs @@ -0,0 +1,91 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate client as Substrate finality proof target. The chain we connect to should have +//! runtime that implements `FinalityApi` to allow bridging with +//! chain. + +use crate::finality_pipeline::SubstrateFinalitySyncPipeline; + +use async_trait::async_trait; +use codec::Decode; +use finality_relay::TargetClient; +use relay_substrate_client::{Chain, Client, Error as SubstrateError}; +use relay_utils::relay_loop::Client as RelayClient; + +/// Substrate client as Substrate finality target. +pub struct SubstrateFinalityTarget { + client: Client, + pipeline: P, +} + +impl SubstrateFinalityTarget { + /// Create new Substrate headers target. + pub fn new(client: Client, pipeline: P) -> Self { + SubstrateFinalityTarget { client, pipeline } + } +} + +impl Clone for SubstrateFinalityTarget { + fn clone(&self) -> Self { + SubstrateFinalityTarget { + client: self.client.clone(), + pipeline: self.pipeline.clone(), + } + } +} + +#[async_trait] +impl RelayClient for SubstrateFinalityTarget { + type Error = SubstrateError; + + async fn reconnect(&mut self) -> Result<(), SubstrateError> { + self.client.reconnect().await + } +} + +#[async_trait] +impl TargetClient

for SubstrateFinalityTarget +where + C: Chain, + P::Number: Decode, + P::Hash: Decode, + P: SubstrateFinalitySyncPipeline, +{ + async fn best_finalized_source_block_number(&self) -> Result { + // we can't continue to relay finality if target node is out of sync, because + // it may have already received (some of) headers that we're going to relay + self.client.ensure_synced().await?; + + Ok(crate::messages_source::read_client_state::( + &self.client, + P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET, + ) + .await? + .best_finalized_peer_at_best_self + .0) + } + + async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), SubstrateError> { + self.client + .submit_signed_extrinsic(self.pipeline.transactions_author(), move |transaction_nonce| { + self.pipeline + .make_submit_finality_proof_transaction(transaction_nonce, header, proof) + }) + .await + .map(drop) + } +} diff --git a/polkadot/relays/bin-substrate/src/headers_initialize.rs b/polkadot/relays/bin-substrate/src/headers_initialize.rs new file mode 100644 index 00000000000..c2eab1bd353 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/headers_initialize.rs @@ -0,0 +1,256 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Initialize Substrate -> Substrate headers bridge. +//! +//! Initialization is a transaction that calls `initialize()` function of the +//! `pallet-bridge-grandpa` pallet. This transaction brings initial header +//! and authorities set from source to target chain. The headers sync starts +//! with this header. + +use bp_header_chain::InitializationData; +use bp_header_chain::{ + find_grandpa_authorities_scheduled_change, + justification::{verify_justification, GrandpaJustification}, +}; +use codec::Decode; +use finality_grandpa::voter_set::VoterSet; +use num_traits::{One, Zero}; +use relay_substrate_client::{Chain, Client}; +use sp_core::Bytes; +use sp_finality_grandpa::AuthorityList as GrandpaAuthoritiesSet; +use sp_runtime::traits::Header as HeaderT; + +/// Submit headers-bridge initialization transaction. +pub async fn initialize( + source_client: Client, + target_client: Client, + target_transactions_signer: TargetChain::AccountId, + prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes, +) { + let result = do_initialize( + source_client, + target_client, + target_transactions_signer, + prepare_initialize_transaction, + ) + .await; + + match result { + Ok(tx_hash) => log::info!( + target: "bridge", + "Successfully submitted {}-headers bridge initialization transaction to {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + tx_hash, + ), + Err(err) => log::error!( + target: "bridge", + "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + err, + ), + } +} + +/// Craft and submit initialization transaction, returning any error that may occur. +async fn do_initialize( + source_client: Client, + target_client: Client, + target_transactions_signer: TargetChain::AccountId, + prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData) -> Bytes, +) -> Result { + let initialization_data = prepare_initialization_data(source_client).await?; + log::info!( + target: "bridge", + "Prepared initialization data for {}-headers bridge at {}: {:?}", + SourceChain::NAME, + TargetChain::NAME, + initialization_data, + ); + + let initialization_tx_hash = target_client + .submit_signed_extrinsic(target_transactions_signer, move |transaction_nonce| { + prepare_initialize_transaction(transaction_nonce, initialization_data) + }) + .await + .map_err(|err| format!("Failed to submit {} transaction: {:?}", TargetChain::NAME, err))?; + Ok(initialization_tx_hash) +} + +/// Prepare initialization data for the GRANDPA verifier pallet. +async fn prepare_initialization_data( + source_client: Client, +) -> Result, String> { + // In ideal world we just need to get best finalized header and then to read GRANDPA authorities + // set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at this header. + // + // But now there are problems with this approach - `CurrentSetId` may return invalid value. So here + // we're waiting for the next justification, read the authorities set and then try to figure out + // the set id with bruteforce. + let mut justifications = source_client + .subscribe_justifications() + .await + .map_err(|err| format!("Failed to subscribe to {} justifications: {:?}", SourceChain::NAME, err))?; + + // Read next justification - the header that it finalizes will be used as initial header. + let justification = justifications.next().await.ok_or_else(|| { + format!( + "Failed to read {} justification from the stream: stream has ended unexpectedly", + SourceChain::NAME, + ) + })?; + + // Read initial header. + let justification: GrandpaJustification = Decode::decode(&mut &justification.0[..]) + .map_err(|err| format!("Failed to decode {} justification: {:?}", SourceChain::NAME, err))?; + + let (initial_header_hash, initial_header_number) = + (justification.commit.target_hash, justification.commit.target_number); + + let initial_header = source_header(&source_client, initial_header_hash).await?; + log::trace!(target: "bridge", "Selected {} initial header: {}/{}", + SourceChain::NAME, + initial_header_number, + initial_header_hash, + ); + + // Read GRANDPA authorities set at initial header. + let initial_authorities_set = source_authorities_set(&source_client, initial_header_hash).await?; + log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}", + SourceChain::NAME, + initial_authorities_set, + ); + + // If initial header changes the GRANDPA authorities set, then we need previous authorities + // to verify justification. + let mut authorities_for_verification = initial_authorities_set.clone(); + let scheduled_change = find_grandpa_authorities_scheduled_change(&initial_header); + assert!( + scheduled_change.as_ref().map(|c| c.delay.is_zero()).unwrap_or(true), + "GRANDPA authorities change at {} scheduled to happen in {:?} blocks. We expect\ + regular hange to have zero delay", + initial_header_hash, + scheduled_change.as_ref().map(|c| c.delay), + ); + let schedules_change = scheduled_change.is_some(); + if schedules_change { + authorities_for_verification = source_authorities_set(&source_client, *initial_header.parent_hash()).await?; + log::trace!( + target: "bridge", + "Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}", + SourceChain::NAME, + authorities_for_verification, + ); + } + + // Now let's try to guess authorities set id by verifying justification. + let mut initial_authorities_set_id = 0; + let mut min_possible_block_number = SourceChain::BlockNumber::zero(); + let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()).ok_or_else(|| { + format!( + "Read invalid {} authorities set: {:?}", + SourceChain::NAME, + authorities_for_verification, + ) + })?; + loop { + log::trace!( + target: "bridge", "Trying {} GRANDPA authorities set id: {}", + SourceChain::NAME, + initial_authorities_set_id, + ); + + let is_valid_set_id = verify_justification::( + (initial_header_hash, initial_header_number), + initial_authorities_set_id, + &authorities_for_verification, + &justification, + ) + .is_ok(); + + if is_valid_set_id { + break; + } + + initial_authorities_set_id += 1; + min_possible_block_number += One::one(); + if min_possible_block_number > initial_header_number { + // there can't be more authorities set changes than headers => if we have reached `initial_block_number` + // and still have not found correct value of `initial_authorities_set_id`, then something + // else is broken => fail + return Err(format!( + "Failed to guess initial {} GRANDPA authorities set id: checked all\ + possible ids in range [0; {}]", + SourceChain::NAME, + initial_header_number + )); + } + } + + Ok(InitializationData { + header: initial_header, + authority_list: initial_authorities_set, + set_id: if schedules_change { + initial_authorities_set_id + 1 + } else { + initial_authorities_set_id + }, + is_halted: false, + }) +} + +/// Read header by hash from the source client. +async fn source_header( + source_client: &Client, + header_hash: SourceChain::Hash, +) -> Result { + source_client.header_by_hash(header_hash).await.map_err(|err| { + format!( + "Failed to retrive {} header with hash {}: {:?}", + SourceChain::NAME, + header_hash, + err, + ) + }) +} + +/// Read GRANDPA authorities set at given header. +async fn source_authorities_set( + source_client: &Client, + header_hash: SourceChain::Hash, +) -> Result { + let raw_authorities_set = source_client + .grandpa_authorities_set(header_hash) + .await + .map_err(|err| { + format!( + "Failed to retrive {} GRANDPA authorities set at header {}: {:?}", + SourceChain::NAME, + header_hash, + err, + ) + })?; + GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]).map_err(|err| { + format!( + "Failed to decode {} GRANDPA authorities set at header {}: {:?}", + SourceChain::NAME, + header_hash, + err, + ) + }) +} diff --git a/polkadot/relays/bin-substrate/src/main.rs b/polkadot/relays/bin-substrate/src/main.rs new file mode 100644 index 00000000000..6bf7561fcdb --- /dev/null +++ b/polkadot/relays/bin-substrate/src/main.rs @@ -0,0 +1,41 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate-to-substrate relay entrypoint. + +#![warn(missing_docs)] + +use relay_utils::initialize::initialize_logger; + +mod chains; +mod cli; +mod finality_pipeline; +mod finality_target; +mod headers_initialize; +mod messages_lane; +mod messages_source; +mod messages_target; +mod on_demand_headers; + +fn main() { + initialize_logger(false); + let command = cli::parse_args(); + let run = command.run(); + let result = async_std::task::block_on(run); + if let Err(error) = result { + log::error!(target: "bridge", "Failed to start relay: {}", error); + } +} diff --git a/polkadot/relays/bin-substrate/src/messages_lane.rs b/polkadot/relays/bin-substrate/src/messages_lane.rs new file mode 100644 index 00000000000..9948b6ec083 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/messages_lane.rs @@ -0,0 +1,209 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::messages_source::SubstrateMessagesProof; +use crate::messages_target::SubstrateMessagesReceivingProof; +use crate::on_demand_headers::OnDemandHeadersRelay; + +use bp_messages::{LaneId, MessageNonce}; +use frame_support::weights::Weight; +use messages_relay::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; +use relay_substrate_client::{BlockNumberOf, Chain, Client, HashOf}; +use relay_utils::{metrics::MetricsParams, BlockNumberBase}; +use sp_core::Bytes; +use std::ops::RangeInclusive; + +/// Substrate <-> Substrate messages relay parameters. +pub struct MessagesRelayParams { + /// Messages source client. + pub source_client: Client, + /// Sign parameters for messages source chain. + pub source_sign: SS, + /// Messages target client. + pub target_client: Client, + /// Sign parameters for messages target chain. + pub target_sign: TS, + /// Optional on-demand source to target headers relay. + pub source_to_target_headers_relay: Option>, + /// Optional on-demand target to source headers relay. + pub target_to_source_headers_relay: Option>, + /// Identifier of lane that needs to be served. + pub lane_id: LaneId, + /// Metrics parameters. + pub metrics_params: MetricsParams, +} + +/// Message sync pipeline for Substrate <-> Substrate relays. +pub trait SubstrateMessageLane: MessageLane { + /// Name of the runtime method that returns dispatch weight of outbound messages at the source chain. + const OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD: &'static str; + /// Name of the runtime method that returns latest generated nonce at the source chain. + const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str; + /// Name of the runtime method that returns latest received (confirmed) nonce at the the source chain. + const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; + + /// Name of the runtime method that returns latest received nonce at the target chain. + const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str; + /// Name of the runtime method that returns latest confirmed (reward-paid) nonce at the target chain. + const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str; + /// Numebr of the runtime method that returns state of "unrewarded relayers" set at the target chain. + const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str; + + /// Name of the runtime method that returns id of best finalized source header at target chain. + const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str; + /// Name of the runtime method that returns id of best finalized target header at source chain. + const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str; + + /// Source chain. + type SourceChain: Chain; + /// Target chain. + type TargetChain: Chain; + + /// Returns id of account that we're using to sign transactions at target chain (messages proof). + fn target_transactions_author(&self) -> ::AccountId; + + /// Make messages delivery transaction. + fn make_messages_delivery_transaction( + &self, + transaction_nonce: ::Index, + generated_at_header: SourceHeaderIdOf, + nonces: RangeInclusive, + proof: Self::MessagesProof, + ) -> Bytes; + + /// Returns id of account that we're using to sign transactions at source chain (delivery proof). + fn source_transactions_author(&self) -> ::AccountId; + + /// Make messages receiving proof transaction. + fn make_messages_receiving_proof_transaction( + &self, + transaction_nonce: ::Index, + generated_at_header: TargetHeaderIdOf, + proof: Self::MessagesReceivingProof, + ) -> Bytes; +} + +/// Substrate-to-Substrate message lane. +#[derive(Debug)] +pub struct SubstrateMessageLaneToSubstrate { + /// Client for the source Substrate chain. + pub(crate) source_client: Client, + /// Parameters required to sign transactions for source chain. + pub(crate) source_sign: SourceSignParams, + /// Client for the target Substrate chain. + pub(crate) target_client: Client, + /// Parameters required to sign transactions for target chain. + pub(crate) target_sign: TargetSignParams, + /// Account id of relayer at the source chain. + pub(crate) relayer_id_at_source: Source::AccountId, +} + +impl Clone + for SubstrateMessageLaneToSubstrate +{ + fn clone(&self) -> Self { + Self { + source_client: self.source_client.clone(), + source_sign: self.source_sign.clone(), + target_client: self.target_client.clone(), + target_sign: self.target_sign.clone(), + relayer_id_at_source: self.relayer_id_at_source.clone(), + } + } +} + +impl MessageLane + for SubstrateMessageLaneToSubstrate +where + SourceSignParams: Clone + Send + Sync + 'static, + TargetSignParams: Clone + Send + Sync + 'static, + BlockNumberOf: BlockNumberBase, + BlockNumberOf: BlockNumberBase, +{ + const SOURCE_NAME: &'static str = Source::NAME; + const TARGET_NAME: &'static str = Target::NAME; + + type MessagesProof = SubstrateMessagesProof; + type MessagesReceivingProof = SubstrateMessagesReceivingProof; + + type SourceHeaderNumber = BlockNumberOf; + type SourceHeaderHash = HashOf; + + type TargetHeaderNumber = BlockNumberOf; + type TargetHeaderHash = HashOf; +} + +/// Returns maximal number of messages and their maximal cumulative dispatch weight, based +/// on given chain parameters. +pub fn select_delivery_transaction_limits( + max_extrinsic_weight: Weight, + max_unconfirmed_messages_at_inbound_lane: MessageNonce, +) -> (MessageNonce, Weight) { + // We may try to guess accurate value, based on maximal number of messages and per-message + // weight overhead, but the relay loop isn't using this info in a super-accurate way anyway. + // So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is + // for messages dispatch. + + // Another thing to keep in mind is that our runtimes (when this code was written) accept + // messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than + // that for dispatch. + + let weight_for_delivery_tx = max_extrinsic_weight / 3; + let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; + + let delivery_tx_base_weight = + W::receive_messages_proof_overhead() + W::receive_messages_proof_outbound_lane_state_overhead(); + let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight; + let max_number_of_messages = std::cmp::min( + delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1), + max_unconfirmed_messages_at_inbound_lane, + ); + + assert!( + max_number_of_messages > 0, + "Relay should fit at least one message in every delivery transaction", + ); + assert!( + weight_for_messages_dispatch >= max_extrinsic_weight / 2, + "Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2", + ); + + (max_number_of_messages, weight_for_messages_dispatch) +} + +#[cfg(test)] +mod tests { + use super::*; + + type RialtoToMillauMessagesWeights = pallet_bridge_messages::weights::RialtoWeight; + + #[test] + fn select_delivery_transaction_limits_works() { + let (max_count, max_weight) = select_delivery_transaction_limits::( + bp_millau::max_extrinsic_weight(), + bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE, + ); + assert_eq!( + (max_count, max_weight), + // We don't actually care about these values, so feel free to update them whenever test + // fails. The only thing to do before that is to ensure that new values looks sane: i.e. weight + // reserved for messages dispatch allows dispatch of non-trivial messages. + // + // Any significant change in this values should attract additional attention. + (1020, 216_583_333_334), + ); + } +} diff --git a/polkadot/relays/bin-substrate/src/messages_source.rs b/polkadot/relays/bin-substrate/src/messages_source.rs new file mode 100644 index 00000000000..cf98f3276be --- /dev/null +++ b/polkadot/relays/bin-substrate/src/messages_source.rs @@ -0,0 +1,411 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate client as Substrate messages source. The chain we connect to should have +//! runtime that implements `HeaderApi` to allow bridging with +//! chain. + +use crate::messages_lane::SubstrateMessageLane; +use crate::on_demand_headers::OnDemandHeadersRelay; + +use async_trait::async_trait; +use bp_messages::{LaneId, MessageNonce}; +use bp_runtime::InstanceId; +use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; +use codec::{Decode, Encode}; +use frame_support::{traits::Instance, weights::Weight}; +use messages_relay::{ + message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, + message_lane_loop::{ + ClientState, MessageProofParameters, MessageWeights, MessageWeightsMap, SourceClient, SourceClientState, + }, +}; +use pallet_bridge_messages::Config as MessagesConfig; +use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf}; +use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId}; +use sp_core::Bytes; +use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; +use std::{marker::PhantomData, ops::RangeInclusive}; + +/// Intermediate message proof returned by the source Substrate node. Includes everything +/// required to submit to the target node: cumulative dispatch weight of bundled messages and +/// the proof itself. +pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); + +/// Substrate client as Substrate messages source. +pub struct SubstrateMessagesSource { + client: Client, + lane: P, + lane_id: LaneId, + instance: InstanceId, + target_to_source_headers_relay: Option>, + _phantom: PhantomData<(R, I)>, +} + +impl SubstrateMessagesSource { + /// Create new Substrate headers source. + pub fn new( + client: Client, + lane: P, + lane_id: LaneId, + instance: InstanceId, + target_to_source_headers_relay: Option>, + ) -> Self { + SubstrateMessagesSource { + client, + lane, + lane_id, + instance, + target_to_source_headers_relay, + _phantom: Default::default(), + } + } +} + +impl Clone for SubstrateMessagesSource { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + lane: self.lane.clone(), + lane_id: self.lane_id, + instance: self.instance, + target_to_source_headers_relay: self.target_to_source_headers_relay.clone(), + _phantom: Default::default(), + } + } +} + +#[async_trait] +impl RelayClient for SubstrateMessagesSource +where + C: Chain, + P: SubstrateMessageLane, + R: Send + Sync, + I: Send + Sync + Instance, +{ + type Error = SubstrateError; + + async fn reconnect(&mut self) -> Result<(), SubstrateError> { + self.client.reconnect().await + } +} + +#[async_trait] +impl SourceClient

for SubstrateMessagesSource +where + C: Chain, + C::Header: DeserializeOwned, + C::Index: DeserializeOwned, + C::BlockNumber: BlockNumberBase, + P: SubstrateMessageLane< + MessagesProof = SubstrateMessagesProof, + SourceHeaderNumber = ::Number, + SourceHeaderHash = ::Hash, + SourceChain = C, + >, + P::TargetChain: Chain, + P::TargetHeaderNumber: Decode, + P::TargetHeaderHash: Decode, + R: Send + Sync + MessagesConfig, + I: Send + Sync + Instance, +{ + async fn state(&self) -> Result, SubstrateError> { + // we can't continue to deliver confirmations if source node is out of sync, because + // it may have already received confirmations that we're going to deliver + self.client.ensure_synced().await?; + + read_client_state::<_, P::TargetHeaderHash, P::TargetHeaderNumber>( + &self.client, + P::BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE, + ) + .await + } + + async fn latest_generated_nonce( + &self, + id: SourceHeaderIdOf

, + ) -> Result<(SourceHeaderIdOf

, MessageNonce), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let latest_generated_nonce: MessageNonce = + Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, latest_generated_nonce)) + } + + async fn latest_confirmed_received_nonce( + &self, + id: SourceHeaderIdOf

, + ) -> Result<(SourceHeaderIdOf

, MessageNonce), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let latest_received_nonce: MessageNonce = + Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, latest_received_nonce)) + } + + async fn generated_messages_weights( + &self, + id: SourceHeaderIdOf

, + nonces: RangeInclusive, + ) -> Result { + let encoded_response = self + .client + .state_call( + P::OUTBOUND_LANE_MESSAGES_DISPATCH_WEIGHT_METHOD.into(), + Bytes((self.lane_id, nonces.start(), nonces.end()).encode()), + Some(id.1), + ) + .await?; + + make_message_weights_map::( + Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?, + nonces, + ) + } + + async fn prove_messages( + &self, + id: SourceHeaderIdOf

, + nonces: RangeInclusive, + proof_parameters: MessageProofParameters, + ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), SubstrateError> { + let mut storage_keys = Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); + let mut message_nonce = *nonces.start(); + while message_nonce <= *nonces.end() { + let message_key = pallet_bridge_messages::storage_keys::message_key::(&self.lane_id, message_nonce); + storage_keys.push(message_key); + message_nonce += 1; + } + if proof_parameters.outbound_state_proof_required { + storage_keys.push(pallet_bridge_messages::storage_keys::outbound_lane_data_key::( + &self.lane_id, + )); + } + + let proof = self + .client + .prove_storage(storage_keys, id.1) + .await? + .iter_nodes() + .collect(); + let proof = FromBridgedChainMessagesProof { + bridged_header_hash: id.1, + storage_proof: proof, + lane: self.lane_id, + nonces_start: *nonces.start(), + nonces_end: *nonces.end(), + }; + Ok((id, nonces, (proof_parameters.dispatch_weight, proof))) + } + + async fn submit_messages_receiving_proof( + &self, + generated_at_block: TargetHeaderIdOf

, + proof: P::MessagesReceivingProof, + ) -> Result<(), SubstrateError> { + self.client + .submit_signed_extrinsic(self.lane.source_transactions_author(), move |transaction_nonce| { + self.lane + .make_messages_receiving_proof_transaction(transaction_nonce, generated_at_block, proof) + }) + .await?; + Ok(()) + } + + async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

) { + if let Some(ref target_to_source_headers_relay) = self.target_to_source_headers_relay { + target_to_source_headers_relay.require_finalized_header(id); + } + } +} + +pub async fn read_client_state( + self_client: &Client, + best_finalized_header_id_method_name: &str, +) -> Result, HeaderId>, SubstrateError> +where + SelfChain: Chain, + SelfChain::Header: DeserializeOwned, + SelfChain::Index: DeserializeOwned, + BridgedHeaderHash: Decode, + BridgedHeaderNumber: Decode, +{ + // let's read our state first: we need best finalized header hash on **this** chain + let self_best_finalized_header_hash = self_client.best_finalized_header_hash().await?; + let self_best_finalized_header = self_client.header_by_hash(self_best_finalized_header_hash).await?; + let self_best_finalized_id = HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash); + + // now let's read our best header on **this** chain + let self_best_header = self_client.best_header().await?; + let self_best_hash = self_best_header.hash(); + let self_best_id = HeaderId(*self_best_header.number(), self_best_hash); + + // now let's read id of best finalized peer header at our best finalized block + let encoded_best_finalized_peer_on_self = self_client + .state_call( + best_finalized_header_id_method_name.into(), + Bytes(Vec::new()), + Some(self_best_hash), + ) + .await?; + let decoded_best_finalized_peer_on_self: (BridgedHeaderNumber, BridgedHeaderHash) = + Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + let peer_on_self_best_finalized_id = HeaderId( + decoded_best_finalized_peer_on_self.0, + decoded_best_finalized_peer_on_self.1, + ); + + Ok(ClientState { + best_self: self_best_id, + best_finalized_self: self_best_finalized_id, + best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, + }) +} + +fn make_message_weights_map( + weights: Vec<(MessageNonce, Weight, u32)>, + nonces: RangeInclusive, +) -> Result { + let make_missing_nonce_error = |expected_nonce| { + Err(SubstrateError::Custom(format!( + "Missing nonce {} in messages_dispatch_weight call result. Expected all nonces from {:?}", + expected_nonce, nonces, + ))) + }; + + let mut weights_map = MessageWeightsMap::new(); + + // this is actually prevented by external logic + if nonces.is_empty() { + return Ok(weights_map); + } + + // check if last nonce is missing - loop below is not checking this + let last_nonce_is_missing = weights + .last() + .map(|(last_nonce, _, _)| last_nonce != nonces.end()) + .unwrap_or(true); + if last_nonce_is_missing { + return make_missing_nonce_error(*nonces.end()); + } + + let mut expected_nonce = *nonces.start(); + let mut is_at_head = true; + + for (nonce, weight, size) in weights { + match (nonce == expected_nonce, is_at_head) { + (true, _) => (), + (false, true) => { + // this may happen if some messages were already pruned from the source node + // + // this is not critical error and will be auto-resolved by messages lane (and target node) + log::info!( + target: "bridge", + "Some messages are missing from the {} node: {:?}. Target node may be out of sync?", + C::NAME, + expected_nonce..nonce, + ); + } + (false, false) => { + // some nonces are missing from the middle/tail of the range + // + // this is critical error, because we can't miss any nonces + return make_missing_nonce_error(expected_nonce); + } + } + + weights_map.insert( + nonce, + MessageWeights { + weight, + size: size as _, + }, + ); + expected_nonce = nonce + 1; + is_at_head = false; + } + + Ok(weights_map) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn make_message_weights_map_succeeds_if_no_messages_are_missing() { + assert_eq!( + make_message_weights_map::(vec![(1, 0, 0), (2, 0, 0), (3, 0, 0)], 1..=3,) + .unwrap(), + vec![ + (1, MessageWeights { weight: 0, size: 0 }), + (2, MessageWeights { weight: 0, size: 0 }), + (3, MessageWeights { weight: 0, size: 0 }), + ] + .into_iter() + .collect(), + ); + } + + #[test] + fn make_message_weights_map_succeeds_if_head_messages_are_missing() { + assert_eq!( + make_message_weights_map::(vec![(2, 0, 0), (3, 0, 0)], 1..=3,).unwrap(), + vec![ + (2, MessageWeights { weight: 0, size: 0 }), + (3, MessageWeights { weight: 0, size: 0 }), + ] + .into_iter() + .collect(), + ); + } + + #[test] + fn make_message_weights_map_fails_if_mid_messages_are_missing() { + assert!(matches!( + make_message_weights_map::(vec![(1, 0, 0), (3, 0, 0)], 1..=3,), + Err(SubstrateError::Custom(_)) + )); + } + + #[test] + fn make_message_weights_map_fails_if_tail_messages_are_missing() { + assert!(matches!( + make_message_weights_map::(vec![(1, 0, 0), (2, 0, 0)], 1..=3,), + Err(SubstrateError::Custom(_)) + )); + } + + #[test] + fn make_message_weights_map_fails_if_all_messages_are_missing() { + assert!(matches!( + make_message_weights_map::(vec![], 1..=3), + Err(SubstrateError::Custom(_)) + )); + } +} diff --git a/polkadot/relays/bin-substrate/src/messages_target.rs b/polkadot/relays/bin-substrate/src/messages_target.rs new file mode 100644 index 00000000000..17608327306 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/messages_target.rs @@ -0,0 +1,232 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate client as Substrate messages target. The chain we connect to should have +//! runtime that implements `HeaderApi` to allow bridging with +//! chain. + +use crate::messages_lane::SubstrateMessageLane; +use crate::messages_source::read_client_state; +use crate::on_demand_headers::OnDemandHeadersRelay; + +use async_trait::async_trait; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState}; +use bp_runtime::InstanceId; +use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; +use codec::{Decode, Encode}; +use frame_support::traits::Instance; +use messages_relay::{ + message_lane::{SourceHeaderIdOf, TargetHeaderIdOf}, + message_lane_loop::{TargetClient, TargetClientState}, +}; +use pallet_bridge_messages::Config as MessagesConfig; +use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf}; +use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase}; +use sp_core::Bytes; +use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; +use std::{marker::PhantomData, ops::RangeInclusive}; + +/// Message receiving proof returned by the target Substrate node. +pub type SubstrateMessagesReceivingProof = ( + UnrewardedRelayersState, + FromBridgedChainMessagesDeliveryProof>, +); + +/// Substrate client as Substrate messages target. +pub struct SubstrateMessagesTarget { + client: Client, + lane: P, + lane_id: LaneId, + instance: InstanceId, + source_to_target_headers_relay: Option>, + _phantom: PhantomData<(R, I)>, +} + +impl SubstrateMessagesTarget { + /// Create new Substrate headers target. + pub fn new( + client: Client, + lane: P, + lane_id: LaneId, + instance: InstanceId, + source_to_target_headers_relay: Option>, + ) -> Self { + SubstrateMessagesTarget { + client, + lane, + lane_id, + instance, + source_to_target_headers_relay, + _phantom: Default::default(), + } + } +} + +impl Clone for SubstrateMessagesTarget { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + lane: self.lane.clone(), + lane_id: self.lane_id, + instance: self.instance, + source_to_target_headers_relay: self.source_to_target_headers_relay.clone(), + _phantom: Default::default(), + } + } +} + +#[async_trait] +impl RelayClient for SubstrateMessagesTarget +where + C: Chain, + P: SubstrateMessageLane, + R: Send + Sync, + I: Send + Sync + Instance, +{ + type Error = SubstrateError; + + async fn reconnect(&mut self) -> Result<(), SubstrateError> { + self.client.reconnect().await + } +} + +#[async_trait] +impl TargetClient

for SubstrateMessagesTarget +where + C: Chain, + C::Header: DeserializeOwned, + C::Index: DeserializeOwned, + ::Number: BlockNumberBase, + P: SubstrateMessageLane< + TargetChain = C, + MessagesReceivingProof = SubstrateMessagesReceivingProof, + TargetHeaderNumber = ::Number, + TargetHeaderHash = ::Hash, + >, + P::SourceChain: Chain, + P::SourceHeaderNumber: Decode, + P::SourceHeaderHash: Decode, + R: Send + Sync + MessagesConfig, + I: Send + Sync + Instance, +{ + async fn state(&self) -> Result, SubstrateError> { + // we can't continue to deliver messages if target node is out of sync, because + // it may have already received (some of) messages that we're going to deliver + self.client.ensure_synced().await?; + + read_client_state::<_, P::SourceHeaderHash, P::SourceHeaderNumber>( + &self.client, + P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET, + ) + .await + } + + async fn latest_received_nonce( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, MessageNonce), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let latest_received_nonce: MessageNonce = + Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, latest_received_nonce)) + } + + async fn latest_confirmed_received_nonce( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, MessageNonce), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let latest_received_nonce: MessageNonce = + Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, latest_received_nonce)) + } + + async fn unrewarded_relayers_state( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, UnrewardedRelayersState), SubstrateError> { + let encoded_response = self + .client + .state_call( + P::INBOUND_LANE_UNREWARDED_RELAYERS_STATE.into(), + Bytes(self.lane_id.encode()), + Some(id.1), + ) + .await?; + let unrewarded_relayers_state: UnrewardedRelayersState = + Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?; + Ok((id, unrewarded_relayers_state)) + } + + async fn prove_messages_receiving( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), SubstrateError> { + let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; + let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::(&self.lane_id); + let proof = self + .client + .prove_storage(vec![inbound_data_key], id.1) + .await? + .iter_nodes() + .collect(); + let proof = FromBridgedChainMessagesDeliveryProof { + bridged_header_hash: id.1, + storage_proof: proof, + lane: self.lane_id, + }; + Ok((id, (relayers_state, proof))) + } + + async fn submit_messages_proof( + &self, + generated_at_header: SourceHeaderIdOf

, + nonces: RangeInclusive, + proof: P::MessagesProof, + ) -> Result, SubstrateError> { + self.client + .submit_signed_extrinsic(self.lane.target_transactions_author(), |transaction_nonce| { + self.lane.make_messages_delivery_transaction( + transaction_nonce, + generated_at_header, + nonces.clone(), + proof, + ) + }) + .await?; + Ok(nonces) + } + + async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

) { + if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay { + source_to_target_headers_relay.require_finalized_header(id); + } + } +} diff --git a/polkadot/relays/bin-substrate/src/on_demand_headers.rs b/polkadot/relays/bin-substrate/src/on_demand_headers.rs new file mode 100644 index 00000000000..4c86b6a1701 --- /dev/null +++ b/polkadot/relays/bin-substrate/src/on_demand_headers.rs @@ -0,0 +1,255 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! On-demand Substrate -> Substrate headers relay. + +use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate}; +use crate::finality_target::SubstrateFinalityTarget; + +use bp_header_chain::justification::GrandpaJustification; +use finality_relay::TargetClient as FinalityTargetClient; +use futures::{ + channel::{mpsc, oneshot}, + select, FutureExt, StreamExt, +}; +use num_traits::Zero; +use relay_substrate_client::{BlockNumberOf, Chain, Client, HashOf, HeaderIdOf, SyncHeader}; +use relay_utils::{metrics::MetricsParams, BlockNumberBase, HeaderId}; +use std::fmt::Debug; + +/// On-demand Substrate <-> Substrate headers relay. +/// +/// This relay may be started by messages whenever some other relay (e.g. messages relay) needs more +/// headers to be relayed to continue its regular work. When enough headers are relayed, on-demand +/// relay may be deactivated. +#[derive(Clone)] +pub struct OnDemandHeadersRelay { + /// Background task name. + background_task_name: String, + /// Required headers to background sender. + required_header_tx: mpsc::Sender>, +} + +impl OnDemandHeadersRelay { + /// Create new on-demand headers relay. + pub fn new( + source_client: Client, + target_client: Client, + pipeline: SubstrateFinalityToSubstrate, + ) -> Self + where + SourceChain: Chain + Debug, + SourceChain::BlockNumber: BlockNumberBase, + TargetChain: Chain + Debug, + TargetChain::BlockNumber: BlockNumberBase, + TargetSign: Clone + Send + Sync + 'static, + SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + SubstrateFinalityTarget>: + FinalityTargetClient>, + { + let (required_header_tx, required_header_rx) = mpsc::channel(1); + async_std::task::spawn(async move { + background_task(source_client, target_client, pipeline, required_header_rx).await; + }); + + let background_task_name = format!( + "{}-background", + on_demand_headers_relay_name::() + ); + OnDemandHeadersRelay { + background_task_name, + required_header_tx, + } + } + + /// Someone is asking us to relay given finalized header. + pub fn require_finalized_header(&self, header_id: HeaderIdOf) { + if let Err(error) = self.required_header_tx.clone().try_send(header_id) { + log::error!( + target: "bridge", + "Failed to send require header id {:?} to {:?}: {:?}", + header_id, + self.background_task_name, + error, + ); + } + } +} + +/// Background task that is responsible for starting and stopping headers relay when required. +async fn background_task( + source_client: Client, + target_client: Client, + pipeline: SubstrateFinalityToSubstrate, + mut required_header_rx: mpsc::Receiver>, +) where + SourceChain: Chain + Debug, + SourceChain::BlockNumber: BlockNumberBase, + TargetChain: Chain + Debug, + TargetChain::BlockNumber: BlockNumberBase, + TargetSign: Clone + Send + Sync + 'static, + SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + SubstrateFinalityTarget>: + FinalityTargetClient>, +{ + let relay_task_name = on_demand_headers_relay_name::(); + let finality_target = SubstrateFinalityTarget::new(target_client.clone(), pipeline.clone()); + + let mut active_headers_relay = None; + let mut required_header_number = Zero::zero(); + let mut relay_exited_rx = futures::future::pending().left_future(); + + loop { + // wait for next target block or for new required header + select! { + _ = async_std::task::sleep(TargetChain::AVERAGE_BLOCK_INTERVAL).fuse() => {}, + required_header_id = required_header_rx.next() => { + match required_header_id { + Some(required_header_id) => { + if required_header_id.0 > required_header_number { + required_header_number = required_header_id.0; + } + }, + None => { + // that's the only way to exit background task - to drop `required_header_tx` + break + }, + } + }, + _ = relay_exited_rx => { + // there could be a situation when we're receiving exit signals after we + // have already stopped relay or when we have already started new relay. + // but it isn't critical, because even if we'll accidentally stop new relay + // we'll restart it almost immediately + stop_on_demand_headers_relay(active_headers_relay.take()).await; + }, + } + + // read best finalized source block from target + let available_header_number = match finality_target.best_finalized_source_block_number().await { + Ok(available_header_number) => available_header_number, + Err(error) => { + log::error!( + target: "bridge", + "Failed to read best finalized {} header from {} in {} relay: {:?}", + SourceChain::NAME, + TargetChain::NAME, + relay_task_name, + error, + ); + + // we don't know what's happening with target client, so better to stop on-demand relay than + // submit unneeded transactions + // => assume that required header is known to the target node + required_header_number + } + }; + + // start or stop headers relay if required + let activate = required_header_number > available_header_number; + match (activate, active_headers_relay.is_some()) { + (true, false) => { + let (relay_exited_tx, new_relay_exited_rx) = oneshot::channel(); + active_headers_relay = start_on_demand_headers_relay( + relay_task_name.clone(), + relay_exited_tx, + source_client.clone(), + target_client.clone(), + pipeline.clone(), + ); + if active_headers_relay.is_some() { + relay_exited_rx = new_relay_exited_rx.right_future(); + } + } + (false, true) => { + stop_on_demand_headers_relay(active_headers_relay.take()).await; + } + _ => (), + } + } +} + +/// On-demand headers relay task name. +fn on_demand_headers_relay_name() -> String { + format!("on-demand-{}-to-{}", SourceChain::NAME, TargetChain::NAME) +} + +/// Start on-demand headers relay task. +fn start_on_demand_headers_relay( + task_name: String, + relay_exited_tx: oneshot::Sender<()>, + source_client: Client, + target_client: Client, + pipeline: SubstrateFinalityToSubstrate, +) -> Option> +where + SourceChain::BlockNumber: BlockNumberBase, + SubstrateFinalityToSubstrate: SubstrateFinalitySyncPipeline< + Hash = HashOf, + Number = BlockNumberOf, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + TargetChain = TargetChain, + >, + TargetSign: 'static, +{ + let headers_relay_future = + crate::finality_pipeline::run(pipeline, source_client, target_client, MetricsParams::disabled()); + let closure_task_name = task_name.clone(); + async_std::task::Builder::new() + .name(task_name.clone()) + .spawn(async move { + log::info!(target: "bridge", "Starting {} headers relay", closure_task_name); + let result = headers_relay_future.await; + log::trace!(target: "bridge", "{} headers relay has exited. Result: {:?}", closure_task_name, result); + let _ = relay_exited_tx.send(()); + }) + .map_err(|error| { + log::error!( + target: "bridge", + "Failed to start {} relay: {:?}", + task_name, + error, + ); + }) + .ok() +} + +/// Stop on-demand headers relay task. +async fn stop_on_demand_headers_relay(task: Option>) { + if let Some(task) = task { + let task_name = task + .task() + .name() + .expect("on-demand tasks are always started with name; qed") + .to_string(); + log::trace!(target: "bridge", "Cancelling {} headers relay", task_name); + task.cancel().await; + log::info!(target: "bridge", "Cancelled {} headers relay", task_name); + } +} diff --git a/polkadot/relays/client-ethereum/Cargo.toml b/polkadot/relays/client-ethereum/Cargo.toml new file mode 100644 index 00000000000..ebae252ed5a --- /dev/null +++ b/polkadot/relays/client-ethereum/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "relay-ethereum-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +bp-eth-poa = { path = "../../primitives/ethereum-poa" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +hex-literal = "0.3" +jsonrpsee-proc-macros = "=0.2.0-alpha.5" +jsonrpsee-ws-client = "=0.2.0-alpha.5" +libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] } +log = "0.4.11" +relay-utils = { path = "../utils" } +web3 = { version = "0.15", git = "https://github.com/tomusdrw/rust-web3", branch ="td-ethabi", default-features = false } diff --git a/polkadot/relays/client-ethereum/src/client.rs b/polkadot/relays/client-ethereum/src/client.rs new file mode 100644 index 00000000000..e2def5fb03d --- /dev/null +++ b/polkadot/relays/client-ethereum/src/client.rs @@ -0,0 +1,172 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::rpc::Ethereum; +use crate::types::{ + Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx, SyncState, Transaction, + TransactionHash, H256, U256, +}; +use crate::{ConnectionParams, Error, Result}; + +use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; +use std::sync::Arc; + +/// Number of headers missing from the Ethereum node for us to consider node not synced. +const MAJOR_SYNC_BLOCKS: u64 = 5; + +/// The client used to interact with an Ethereum node through RPC. +#[derive(Clone)] +pub struct Client { + params: ConnectionParams, + client: Arc, +} + +impl Client { + /// Create a new Ethereum RPC Client. + pub async fn new(params: ConnectionParams) -> Result { + Ok(Self { + client: Self::build_client(¶ms).await?, + params, + }) + } + + /// Build client to use in connection. + async fn build_client(params: &ConnectionParams) -> Result> { + let uri = format!("ws://{}:{}", params.host, params.port); + let client = RpcClientBuilder::default().build(&uri).await?; + Ok(Arc::new(client)) + } + + /// Reopen client connection. + pub async fn reconnect(&mut self) -> Result<()> { + self.client = Self::build_client(&self.params).await?; + Ok(()) + } +} + +impl Client { + /// Returns true if client is connected to at least one peer and is in synced state. + pub async fn ensure_synced(&self) -> Result<()> { + match Ethereum::syncing(&*self.client).await? { + SyncState::NotSyncing => Ok(()), + SyncState::Syncing(syncing) => { + let missing_headers = syncing.highest_block.saturating_sub(syncing.current_block); + if missing_headers > MAJOR_SYNC_BLOCKS.into() { + return Err(Error::ClientNotSynced(missing_headers)); + } + + Ok(()) + } + } + } + + /// Estimate gas usage for the given call. + pub async fn estimate_gas(&self, call_request: CallRequest) -> Result { + Ok(Ethereum::estimate_gas(&*self.client, call_request).await?) + } + + /// Retrieve number of the best known block from the Ethereum node. + pub async fn best_block_number(&self) -> Result { + Ok(Ethereum::block_number(&*self.client).await?.as_u64()) + } + + /// Retrieve number of the best known block from the Ethereum node. + pub async fn header_by_number(&self, block_number: u64) -> Result

{ + let get_full_tx_objects = false; + let header = Ethereum::get_block_by_number(&*self.client, block_number, get_full_tx_objects).await?; + match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { + true => Ok(header), + false => Err(Error::IncompleteHeader), + } + } + + /// Retrieve block header by its hash from Ethereum node. + pub async fn header_by_hash(&self, hash: H256) -> Result
{ + let get_full_tx_objects = false; + let header = Ethereum::get_block_by_hash(&*self.client, hash, get_full_tx_objects).await?; + match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() { + true => Ok(header), + false => Err(Error::IncompleteHeader), + } + } + + /// Retrieve block header and its transactions by its number from Ethereum node. + pub async fn header_by_number_with_transactions(&self, number: u64) -> Result { + let get_full_tx_objects = true; + let header = + Ethereum::get_block_by_number_with_transactions(&*self.client, number, get_full_tx_objects).await?; + + let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); + if !is_complete_header { + return Err(Error::IncompleteHeader); + } + + let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); + if !is_complete_transactions { + return Err(Error::IncompleteTransaction); + } + + Ok(header) + } + + /// Retrieve block header and its transactions by its hash from Ethereum node. + pub async fn header_by_hash_with_transactions(&self, hash: H256) -> Result { + let get_full_tx_objects = true; + let header = Ethereum::get_block_by_hash_with_transactions(&*self.client, hash, get_full_tx_objects).await?; + + let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some(); + if !is_complete_header { + return Err(Error::IncompleteHeader); + } + + let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some()); + if !is_complete_transactions { + return Err(Error::IncompleteTransaction); + } + + Ok(header) + } + + /// Retrieve transaction by its hash from Ethereum node. + pub async fn transaction_by_hash(&self, hash: H256) -> Result> { + Ok(Ethereum::transaction_by_hash(&*self.client, hash).await?) + } + + /// Retrieve transaction receipt by transaction hash. + pub async fn transaction_receipt(&self, transaction_hash: H256) -> Result { + Ok(Ethereum::get_transaction_receipt(&*self.client, transaction_hash).await?) + } + + /// Get the nonce of the given account. + pub async fn account_nonce(&self, address: Address) -> Result { + Ok(Ethereum::get_transaction_count(&*self.client, address).await?) + } + + /// Submit an Ethereum transaction. + /// + /// The transaction must already be signed before sending it through this method. + pub async fn submit_transaction(&self, signed_raw_tx: SignedRawTx) -> Result { + let transaction = Bytes(signed_raw_tx); + let tx_hash = Ethereum::submit_transaction(&*self.client, transaction).await?; + log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash); + Ok(tx_hash) + } + + /// Call Ethereum smart contract. + pub async fn eth_call(&self, call_transaction: CallRequest) -> Result { + Ok(Ethereum::call(&*self.client, call_transaction).await?) + } +} diff --git a/polkadot/relays/client-ethereum/src/error.rs b/polkadot/relays/client-ethereum/src/error.rs new file mode 100644 index 00000000000..bcd8edc3f33 --- /dev/null +++ b/polkadot/relays/client-ethereum/src/error.rs @@ -0,0 +1,86 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Ethereum node RPC errors. + +use crate::types::U256; + +use jsonrpsee_ws_client::Error as RpcError; +use relay_utils::MaybeConnectionError; + +/// Result type used by Ethereum client. +pub type Result = std::result::Result; + +/// Errors that can occur only when interacting with +/// an Ethereum node through RPC. +#[derive(Debug)] +pub enum Error { + /// An error that can occur when making an HTTP request to + /// an JSON-RPC client. + RpcError(RpcError), + /// Failed to parse response. + ResponseParseFailed(String), + /// We have received a header with missing fields. + IncompleteHeader, + /// We have received a transaction missing a `raw` field. + IncompleteTransaction, + /// An invalid Substrate block number was received from + /// an Ethereum node. + InvalidSubstrateBlockNumber, + /// An invalid index has been received from an Ethereum node. + InvalidIncompleteIndex, + /// The client we're connected to is not synced, so we can't rely on its state. Contains + /// number of unsynced headers. + ClientNotSynced(U256), +} + +impl From for Error { + fn from(error: RpcError) -> Self { + Error::RpcError(error) + } +} + +impl MaybeConnectionError for Error { + fn is_connection_error(&self) -> bool { + matches!( + *self, + Error::RpcError(RpcError::TransportError(_)) + // right now if connection to the ws server is dropped (after it is already established), + // we're getting this error + | Error::RpcError(RpcError::Internal(_)) + | Error::ClientNotSynced(_), + ) + } +} + +impl ToString for Error { + fn to_string(&self) -> String { + match self { + Self::RpcError(e) => e.to_string(), + Self::ResponseParseFailed(e) => e.to_string(), + Self::IncompleteHeader => { + "Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom)" + .to_string() + } + Self::IncompleteTransaction => "Incomplete Ethereum Transaction (missing required field - raw)".to_string(), + Self::InvalidSubstrateBlockNumber => "Received an invalid Substrate block from Ethereum Node".to_string(), + Self::InvalidIncompleteIndex => "Received an invalid incomplete index from Ethereum Node".to_string(), + Self::ClientNotSynced(missing_headers) => { + format!("Ethereum client is not synced: syncing {} headers", missing_headers) + } + } + } +} diff --git a/polkadot/relays/client-ethereum/src/lib.rs b/polkadot/relays/client-ethereum/src/lib.rs new file mode 100644 index 00000000000..8b3c6d8f8e7 --- /dev/null +++ b/polkadot/relays/client-ethereum/src/lib.rs @@ -0,0 +1,48 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools to interact with (Open) Ethereum node using RPC methods. + +#![warn(missing_docs)] + +mod client; +mod error; +mod rpc; +mod sign; + +pub use crate::client::Client; +pub use crate::error::{Error, Result}; +pub use crate::sign::{sign_and_submit_transaction, SigningParams}; + +pub mod types; + +/// Ethereum-over-websocket connection params. +#[derive(Debug, Clone)] +pub struct ConnectionParams { + /// Websocket server hostname. + pub host: String, + /// Websocket server TCP port. + pub port: u16, +} + +impl Default for ConnectionParams { + fn default() -> Self { + ConnectionParams { + host: "localhost".into(), + port: 8546, + } + } +} diff --git a/polkadot/relays/client-ethereum/src/rpc.rs b/polkadot/relays/client-ethereum/src/rpc.rs new file mode 100644 index 00000000000..0fb81f7655a --- /dev/null +++ b/polkadot/relays/client-ethereum/src/rpc.rs @@ -0,0 +1,51 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Ethereum node RPC interface. + +use crate::types::{ + Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, TransactionHash, + H256, U256, U64, +}; + +jsonrpsee_proc_macros::rpc_client_api! { + pub(crate) Ethereum { + #[rpc(method = "eth_syncing", positional_params)] + fn syncing() -> SyncState; + #[rpc(method = "eth_estimateGas", positional_params)] + fn estimate_gas(call_request: CallRequest) -> U256; + #[rpc(method = "eth_blockNumber", positional_params)] + fn block_number() -> U64; + #[rpc(method = "eth_getBlockByNumber", positional_params)] + fn get_block_by_number(block_number: U64, full_tx_objs: bool) -> Header; + #[rpc(method = "eth_getBlockByHash", positional_params)] + fn get_block_by_hash(hash: H256, full_tx_objs: bool) -> Header; + #[rpc(method = "eth_getBlockByNumber", positional_params)] + fn get_block_by_number_with_transactions(number: U64, full_tx_objs: bool) -> HeaderWithTransactions; + #[rpc(method = "eth_getBlockByHash", positional_params)] + fn get_block_by_hash_with_transactions(hash: H256, full_tx_objs: bool) -> HeaderWithTransactions; + #[rpc(method = "eth_getTransactionByHash", positional_params)] + fn transaction_by_hash(hash: H256) -> Option; + #[rpc(method = "eth_getTransactionReceipt", positional_params)] + fn get_transaction_receipt(transaction_hash: H256) -> Receipt; + #[rpc(method = "eth_getTransactionCount", positional_params)] + fn get_transaction_count(address: Address) -> U256; + #[rpc(method = "eth_submitTransaction", positional_params)] + fn submit_transaction(transaction: Bytes) -> TransactionHash; + #[rpc(method = "eth_call", positional_params)] + fn call(transaction_call: CallRequest) -> Bytes; + } +} diff --git a/polkadot/relays/client-ethereum/src/sign.rs b/polkadot/relays/client-ethereum/src/sign.rs new file mode 100644 index 00000000000..6f479ab7d5c --- /dev/null +++ b/polkadot/relays/client-ethereum/src/sign.rs @@ -0,0 +1,85 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::types::{Address, CallRequest, U256}; +use crate::{Client, Result}; +use bp_eth_poa::signatures::{secret_to_address, SignTransaction}; +use hex_literal::hex; +use secp256k1::SecretKey; + +/// Ethereum signing params. +#[derive(Clone, Debug)] +pub struct SigningParams { + /// Ethereum chain id. + pub chain_id: u64, + /// Ethereum transactions signer. + pub signer: SecretKey, + /// Gas price we agree to pay. + pub gas_price: U256, +} + +impl Default for SigningParams { + fn default() -> Self { + SigningParams { + chain_id: 0x11, // Parity dev chain + // account that has a lot of ether when we run instant seal engine + // address: 0x00a329c0648769a73afac7f9381e08fb43dbea72 + // secret: 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7 + signer: SecretKey::parse(&hex!( + "4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7" + )) + .expect("secret is hardcoded, thus valid; qed"), + gas_price: 8_000_000_000u64.into(), // 8 Gwei + } + } +} + +/// Sign and submit tranaction using given Ethereum client. +pub async fn sign_and_submit_transaction( + client: &Client, + params: &SigningParams, + contract_address: Option
, + nonce: Option, + double_gas: bool, + encoded_call: Vec, +) -> Result<()> { + let nonce = if let Some(n) = nonce { + n + } else { + let address: Address = secret_to_address(¶ms.signer); + client.account_nonce(address).await? + }; + + let call_request = CallRequest { + to: contract_address, + data: Some(encoded_call.clone().into()), + ..Default::default() + }; + let gas = client.estimate_gas(call_request).await?; + + let raw_transaction = bp_eth_poa::UnsignedTransaction { + nonce, + to: contract_address, + value: U256::zero(), + gas: if double_gas { gas.saturating_mul(2.into()) } else { gas }, + gas_price: params.gas_price, + payload: encoded_call, + } + .sign_by(¶ms.signer, Some(params.chain_id)); + + let _ = client.submit_transaction(raw_transaction).await?; + Ok(()) +} diff --git a/polkadot/relays/client-ethereum/src/types.rs b/polkadot/relays/client-ethereum/src/types.rs new file mode 100644 index 00000000000..f589474aff1 --- /dev/null +++ b/polkadot/relays/client-ethereum/src/types.rs @@ -0,0 +1,80 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Common types that are used in relay <-> Ethereum node communications. + +use headers_relay::sync_types::SourceHeader; + +pub use web3::types::{Address, Bytes, CallRequest, SyncState, H256, U128, U256, U64}; + +/// When header is just received from the Ethereum node, we check that it has +/// both number and hash fields filled. +pub const HEADER_ID_PROOF: &str = "checked on retrieval; qed"; + +/// Ethereum transaction hash type. +pub type HeaderHash = H256; + +/// Ethereum transaction hash type. +pub type TransactionHash = H256; + +/// Ethereum transaction type. +pub type Transaction = web3::types::Transaction; + +/// Ethereum header type. +pub type Header = web3::types::Block; + +/// Ethereum header type used in headers sync. +#[derive(Clone, Debug, PartialEq)] +pub struct SyncHeader(Header); + +impl std::ops::Deref for SyncHeader { + type Target = Header; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Ethereum header with transactions type. +pub type HeaderWithTransactions = web3::types::Block; + +/// Ethereum transaction receipt type. +pub type Receipt = web3::types::TransactionReceipt; + +/// Ethereum header ID. +pub type HeaderId = relay_utils::HeaderId; + +/// A raw Ethereum transaction that's been signed. +pub type SignedRawTx = Vec; + +impl From
for SyncHeader { + fn from(header: Header) -> Self { + Self(header) + } +} + +impl SourceHeader for SyncHeader { + fn id(&self) -> HeaderId { + relay_utils::HeaderId( + self.number.expect(HEADER_ID_PROOF).as_u64(), + self.hash.expect(HEADER_ID_PROOF), + ) + } + + fn parent_id(&self) -> HeaderId { + relay_utils::HeaderId(self.number.expect(HEADER_ID_PROOF).as_u64() - 1, self.parent_hash) + } +} diff --git a/polkadot/relays/client-kusama/Cargo.toml b/polkadot/relays/client-kusama/Cargo.toml new file mode 100644 index 00000000000..b9c397bca6c --- /dev/null +++ b/polkadot/relays/client-kusama/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "relay-kusama-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies + +bp-kusama = { path = "../../primitives/chain-kusama" } + +# Substrate Dependencies + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/client-kusama/src/lib.rs b/polkadot/relays/client-kusama/src/lib.rs new file mode 100644 index 00000000000..3c3b1cd4c5d --- /dev/null +++ b/polkadot/relays/client-kusama/src/lib.rs @@ -0,0 +1,47 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Kusama chain. + +use relay_substrate_client::{Chain, ChainBase}; +use std::time::Duration; + +/// Kusama header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Kusama chain definition +#[derive(Debug, Clone, Copy)] +pub struct Kusama; + +impl ChainBase for Kusama { + type BlockNumber = bp_kusama::BlockNumber; + type Hash = bp_kusama::Hash; + type Hasher = bp_kusama::Hasher; + type Header = bp_kusama::Header; +} + +impl Chain for Kusama { + const NAME: &'static str = "Kusama"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + + type AccountId = bp_kusama::AccountId; + type Index = bp_kusama::Nonce; + type SignedBlock = bp_kusama::SignedBlock; + type Call = (); +} + +/// Kusama header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/relays/client-millau/Cargo.toml b/polkadot/relays/client-millau/Cargo.toml new file mode 100644 index 00000000000..e16f06f8528 --- /dev/null +++ b/polkadot/relays/client-millau/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "relay-millau-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Supported Chains + +millau-runtime = { path = "../../bin/millau/runtime" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/client-millau/src/lib.rs b/polkadot/relays/client-millau/src/lib.rs new file mode 100644 index 00000000000..1708a8efa12 --- /dev/null +++ b/polkadot/relays/client-millau/src/lib.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Millau-Substrate chain. + +use codec::Encode; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; +use std::time::Duration; + +/// Millau header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Millau chain definition. +#[derive(Debug, Clone, Copy)] +pub struct Millau; + +impl ChainBase for Millau { + type BlockNumber = millau_runtime::BlockNumber; + type Hash = millau_runtime::Hash; + type Hasher = millau_runtime::Hashing; + type Header = millau_runtime::Header; +} + +impl Chain for Millau { + const NAME: &'static str = "Millau"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); + + type AccountId = millau_runtime::AccountId; + type Index = millau_runtime::Index; + type SignedBlock = millau_runtime::SignedBlock; + type Call = millau_runtime::Call; +} + +impl ChainWithBalances for Millau { + type NativeBalance = millau_runtime::Balance; + + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + use frame_support::storage::generator::StorageMap; + StorageKey(frame_system::Account::::storage_map_final_key( + account_id, + )) + } +} + +impl TransactionSignScheme for Millau { + type Chain = Millau; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = millau_runtime::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + signer_nonce: ::Index, + call: ::Call, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::from_raw( + call, + ( + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::Immortal), + frame_system::CheckNonce::::from(signer_nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(0), + ), + ( + millau_runtime::VERSION.spec_version, + millau_runtime::VERSION.transaction_version, + genesis_hash, + genesis_hash, + (), + (), + (), + ), + ); + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + millau_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra) + } +} + +/// Millau signing params. +pub type SigningParams = sp_core::sr25519::Pair; + +/// Millau header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/relays/client-polkadot/Cargo.toml b/polkadot/relays/client-polkadot/Cargo.toml new file mode 100644 index 00000000000..b148745f5a9 --- /dev/null +++ b/polkadot/relays/client-polkadot/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "relay-polkadot-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies + +bp-polkadot = { path = "../../primitives/chain-polkadot" } + +# Substrate Dependencies + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/client-polkadot/src/lib.rs b/polkadot/relays/client-polkadot/src/lib.rs new file mode 100644 index 00000000000..2c117c6d3d1 --- /dev/null +++ b/polkadot/relays/client-polkadot/src/lib.rs @@ -0,0 +1,47 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Polkadot chain. + +use relay_substrate_client::{Chain, ChainBase}; +use std::time::Duration; + +/// Polkadot header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Polkadot chain definition +#[derive(Debug, Clone, Copy)] +pub struct Polkadot; + +impl ChainBase for Polkadot { + type BlockNumber = bp_polkadot::BlockNumber; + type Hash = bp_polkadot::Hash; + type Hasher = bp_polkadot::Hasher; + type Header = bp_polkadot::Header; +} + +impl Chain for Polkadot { + const NAME: &'static str = "Polkadot"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + + type AccountId = bp_polkadot::AccountId; + type Index = bp_polkadot::Nonce; + type SignedBlock = bp_polkadot::SignedBlock; + type Call = (); +} + +/// Polkadot header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/relays/client-rialto/Cargo.toml b/polkadot/relays/client-rialto/Cargo.toml new file mode 100644 index 00000000000..88e8e12add4 --- /dev/null +++ b/polkadot/relays/client-rialto/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "relay-rialto-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies + +rialto-runtime = { path = "../../bin/rialto/runtime" } + +# Substrate Dependencies + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/client-rialto/src/lib.rs b/polkadot/relays/client-rialto/src/lib.rs new file mode 100644 index 00000000000..0ddc03681d2 --- /dev/null +++ b/polkadot/relays/client-rialto/src/lib.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Rialto-Substrate chain. + +use codec::Encode; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; +use std::time::Duration; + +/// Rialto header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Rialto chain definition +#[derive(Debug, Clone, Copy)] +pub struct Rialto; + +impl ChainBase for Rialto { + type BlockNumber = rialto_runtime::BlockNumber; + type Hash = rialto_runtime::Hash; + type Hasher = rialto_runtime::Hashing; + type Header = rialto_runtime::Header; +} + +impl Chain for Rialto { + const NAME: &'static str = "Rialto"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); + + type AccountId = rialto_runtime::AccountId; + type Index = rialto_runtime::Index; + type SignedBlock = rialto_runtime::SignedBlock; + type Call = rialto_runtime::Call; +} + +impl ChainWithBalances for Rialto { + type NativeBalance = rialto_runtime::Balance; + + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + use frame_support::storage::generator::StorageMap; + StorageKey(frame_system::Account::::storage_map_final_key( + account_id, + )) + } +} + +impl TransactionSignScheme for Rialto { + type Chain = Rialto; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = rialto_runtime::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + signer_nonce: ::Index, + call: ::Call, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::from_raw( + call, + ( + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::Immortal), + frame_system::CheckNonce::::from(signer_nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(0), + ), + ( + rialto_runtime::VERSION.spec_version, + rialto_runtime::VERSION.transaction_version, + genesis_hash, + genesis_hash, + (), + (), + (), + ), + ); + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + rialto_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra) + } +} + +/// Rialto signing params. +pub type SigningParams = sp_core::sr25519::Pair; + +/// Rialto header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; diff --git a/polkadot/relays/client-rococo/Cargo.toml b/polkadot/relays/client-rococo/Cargo.toml new file mode 100644 index 00000000000..095f365374a --- /dev/null +++ b/polkadot/relays/client-rococo/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "relay-rococo-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies +bp-rococo = { path = "../../primitives/chain-rococo" } + +# Substrate Dependencies +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/client-rococo/src/lib.rs b/polkadot/relays/client-rococo/src/lib.rs new file mode 100644 index 00000000000..09d205f06e9 --- /dev/null +++ b/polkadot/relays/client-rococo/src/lib.rs @@ -0,0 +1,97 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Rococo-Substrate chain. + +use codec::Encode; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; +use std::time::Duration; + +/// Rococo header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Rococo header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; + +/// Rococo chain definition +#[derive(Debug, Clone, Copy)] +pub struct Rococo; + +impl ChainBase for Rococo { + type BlockNumber = bp_rococo::BlockNumber; + type Hash = bp_rococo::Hash; + type Hasher = bp_rococo::Hashing; + type Header = bp_rococo::Header; +} + +impl Chain for Rococo { + const NAME: &'static str = "Rococo"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + + type AccountId = bp_rococo::AccountId; + type Index = bp_rococo::Index; + type SignedBlock = bp_rococo::SignedBlock; + type Call = bp_rococo::Call; +} + +impl ChainWithBalances for Rococo { + type NativeBalance = bp_rococo::Balance; + + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + StorageKey(bp_rococo::account_info_storage_key(account_id)) + } +} + +impl TransactionSignScheme for Rococo { + type Chain = Rococo; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = bp_rococo::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + signer_nonce: ::Index, + call: ::Call, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::new( + call, + bp_rococo::SignedExtensions::new( + bp_rococo::VERSION, + sp_runtime::generic::Era::Immortal, + genesis_hash, + signer_nonce, + 0, + ), + ) + .expect("SignedExtension never fails."); + + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + bp_rococo::UncheckedExtrinsic::new_signed( + call, + sp_runtime::MultiAddress::Id(signer.into_account()), + signature.into(), + extra, + ) + } +} + +/// Rococo signing params. +pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/relays/client-substrate/Cargo.toml b/polkadot/relays/client-substrate/Cargo.toml new file mode 100644 index 00000000000..7b3f46230fc --- /dev/null +++ b/polkadot/relays/client-substrate/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "relay-substrate-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +async-std = "1.6.5" +async-trait = "0.1.40" +codec = { package = "parity-scale-codec", version = "2.0.0" } +jsonrpsee-proc-macros = "=0.2.0-alpha.5" +jsonrpsee-ws-client = "=0.2.0-alpha.5" +log = "0.4.11" +num-traits = "0.2" +rand = "0.7" + +# Bridge dependencies + +bp-header-chain = { path = "../../primitives/header-chain" } +bp-messages = { path = "../../primitives/messages" } +bp-runtime = { path = "../../primitives/runtime" } +finality-relay = { path = "../finality" } +headers-relay = { path = "../headers" } +relay-utils = { path = "../utils" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } + +#[dev-dependencies] +futures = "0.3.7" diff --git a/polkadot/relays/client-substrate/src/chain.rs b/polkadot/relays/client-substrate/src/chain.rs new file mode 100644 index 00000000000..64c0d6af52b --- /dev/null +++ b/polkadot/relays/client-substrate/src/chain.rs @@ -0,0 +1,105 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use bp_runtime::Chain as ChainBase; +use frame_support::Parameter; +use jsonrpsee_ws_client::{DeserializeOwned, Serialize}; +use num_traits::{CheckedSub, Zero}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{ + generic::SignedBlock, + traits::{ + AtLeast32Bit, Block as BlockT, Dispatchable, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member, + }, + EncodedJustification, +}; +use std::{fmt::Debug, time::Duration}; + +/// Substrate-based chain from minimal relay-client point of view. +pub trait Chain: ChainBase + Clone { + /// Chain name. + const NAME: &'static str; + /// Average block interval. + /// + /// How often blocks are produced on that chain. It's suggested to set this value + /// to match the block time of the chain. + const AVERAGE_BLOCK_INTERVAL: Duration; + + /// The user account identifier type for the runtime. + type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default; + /// Index of a transaction used by the chain. + type Index: Parameter + + Member + + MaybeSerialize + + Debug + + Default + + MaybeDisplay + + DeserializeOwned + + AtLeast32Bit + + Copy; + /// Block type. + type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; + /// The aggregated `Call` type. + type Call: Dispatchable + Debug; +} + +/// Substrate-based chain with `frame_system::Config::AccountData` set to +/// the `pallet_balances::AccountData`. +pub trait ChainWithBalances: Chain { + /// Balance of an account in native tokens. + type NativeBalance: Parameter + Member + DeserializeOwned + Clone + Copy + CheckedSub + PartialOrd + Zero; + + /// Return runtime storage key for getting `frame_system::AccountInfo` of given account. + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey; +} + +/// Block with justification. +pub trait BlockWithJustification
{ + /// Return block header. + fn header(&self) -> Header; + /// Return block justification, if known. + fn justification(&self) -> Option<&EncodedJustification>; +} + +/// Substrate-based chain transactions signing scheme. +pub trait TransactionSignScheme { + /// Chain that this scheme is to be used. + type Chain: Chain; + /// Type of key pairs used to sign transactions. + type AccountKeyPair: Pair; + /// Signed transaction. + type SignedTransaction; + + /// Create transaction for given runtime call, signed by given account. + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + signer_nonce: ::Index, + call: ::Call, + ) -> Self::SignedTransaction; +} + +impl BlockWithJustification for SignedBlock { + fn header(&self) -> Block::Header { + self.block.header().clone() + } + + fn justification(&self) -> Option<&EncodedJustification> { + self.justifications + .as_ref() + .and_then(|j| j.get(sp_finality_grandpa::GRANDPA_ENGINE_ID)) + } +} diff --git a/polkadot/relays/client-substrate/src/client.rs b/polkadot/relays/client-substrate/src/client.rs new file mode 100644 index 00000000000..892a63d6d5b --- /dev/null +++ b/polkadot/relays/client-substrate/src/client.rs @@ -0,0 +1,275 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate node client. + +use crate::chain::{Chain, ChainWithBalances}; +use crate::rpc::Substrate; +use crate::{ConnectionParams, Error, Result}; + +use async_std::sync::{Arc, Mutex}; +use codec::Decode; +use frame_system::AccountInfo; +use jsonrpsee_ws_client::{traits::SubscriptionClient, v2::params::JsonRpcParams, DeserializeOwned}; +use jsonrpsee_ws_client::{Subscription, WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}; +use num_traits::Zero; +use pallet_balances::AccountData; +use sp_core::{storage::StorageKey, Bytes}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; + +const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; +const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; + +/// Opaque justifications subscription type. +pub type JustificationsSubscription = Subscription; + +/// Opaque GRANDPA authorities set. +pub type OpaqueGrandpaAuthoritiesSet = Vec; + +/// Substrate client type. +/// +/// Cloning `Client` is a cheap operation. +pub struct Client { + /// Client connection params. + params: ConnectionParams, + /// Substrate RPC client. + client: Arc, + /// Genesis block hash. + genesis_hash: C::Hash, + /// If several tasks are submitting their transactions simultaneously using `submit_signed_extrinsic` + /// method, they may get the same transaction nonce. So one of transactions will be rejected + /// from the pool. This lock is here to prevent situations like that. + submit_signed_extrinsic_lock: Arc>, +} + +impl Clone for Client { + fn clone(&self) -> Self { + Client { + params: self.params.clone(), + client: self.client.clone(), + genesis_hash: self.genesis_hash, + submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), + } + } +} + +impl std::fmt::Debug for Client { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("Client") + .field("genesis_hash", &self.genesis_hash) + .finish() + } +} + +impl Client { + /// Returns client that is able to call RPCs on Substrate node over websocket connection. + pub async fn new(params: ConnectionParams) -> Result { + let client = Self::build_client(params.clone()).await?; + + let number: C::BlockNumber = Zero::zero(); + let genesis_hash = Substrate::::chain_get_block_hash(&*client, number).await?; + + Ok(Self { + params, + client, + genesis_hash, + submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), + }) + } + + /// Reopen client connection. + pub async fn reconnect(&mut self) -> Result<()> { + self.client = Self::build_client(self.params.clone()).await?; + Ok(()) + } + + /// Build client to use in connection. + async fn build_client(params: ConnectionParams) -> Result> { + let uri = format!( + "{}://{}:{}", + if params.secure { "wss" } else { "ws" }, + params.host, + params.port, + ); + let client = RpcClientBuilder::default() + .max_notifs_per_subscription(MAX_SUBSCRIPTION_CAPACITY) + .build(&uri) + .await?; + + Ok(Arc::new(client)) + } +} + +impl Client { + /// Returns true if client is connected to at least one peer and is in synced state. + pub async fn ensure_synced(&self) -> Result<()> { + let health = Substrate::::system_health(&*self.client).await?; + let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); + if is_synced { + Ok(()) + } else { + Err(Error::ClientNotSynced(health)) + } + } + + /// Return hash of the genesis block. + pub fn genesis_hash(&self) -> &C::Hash { + &self.genesis_hash + } + + /// Return hash of the best finalized block. + pub async fn best_finalized_header_hash(&self) -> Result { + Ok(Substrate::::chain_get_finalized_head(&*self.client).await?) + } + + /// Returns the best Substrate header. + pub async fn best_header(&self) -> Result + where + C::Header: DeserializeOwned, + { + Ok(Substrate::::chain_get_header(&*self.client, None).await?) + } + + /// Get a Substrate block from its hash. + pub async fn get_block(&self, block_hash: Option) -> Result { + Ok(Substrate::::chain_get_block(&*self.client, block_hash).await?) + } + + /// Get a Substrate header by its hash. + pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result + where + C::Header: DeserializeOwned, + { + Ok(Substrate::::chain_get_header(&*self.client, block_hash).await?) + } + + /// Get a Substrate block hash by its number. + pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { + Ok(Substrate::::chain_get_block_hash(&*self.client, number).await?) + } + + /// Get a Substrate header by its number. + pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result + where + C::Header: DeserializeOwned, + { + let block_hash = Self::block_hash_by_number(self, block_number).await?; + Ok(Self::header_by_hash(self, block_hash).await?) + } + + /// Return runtime version. + pub async fn runtime_version(&self) -> Result { + Ok(Substrate::::state_runtime_version(&*self.client).await?) + } + + /// Read value from runtime storage. + pub async fn storage_value(&self, storage_key: StorageKey) -> Result> { + Substrate::::state_get_storage(&*self.client, storage_key) + .await? + .map(|encoded_value| T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed)) + .transpose() + } + + /// Return native tokens balance of the account. + pub async fn free_native_balance(&self, account: C::AccountId) -> Result + where + C: ChainWithBalances, + { + let storage_key = C::account_info_storage_key(&account); + let encoded_account_data = Substrate::::state_get_storage(&*self.client, storage_key) + .await? + .ok_or(Error::AccountDoesNotExist)?; + let decoded_account_data = + AccountInfo::>::decode(&mut &encoded_account_data.0[..]) + .map_err(Error::ResponseParseFailed)?; + Ok(decoded_account_data.data.free) + } + + /// Get the nonce of the given Substrate account. + /// + /// Note: It's the caller's responsibility to make sure `account` is a valid ss58 address. + pub async fn next_account_index(&self, account: C::AccountId) -> Result { + Ok(Substrate::::system_account_next_index(&*self.client, account).await?) + } + + /// Submit unsigned extrinsic for inclusion in a block. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { + let tx_hash = Substrate::::author_submit_extrinsic(&*self.client, transaction).await?; + log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash); + Ok(tx_hash) + } + + /// Submit an extrinsic signed by given account. + /// + /// All calls of this method are synchronized, so there can't be more than one active + /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen + /// if all client instances are clones of the same initial `Client`. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + pub async fn submit_signed_extrinsic( + &self, + extrinsic_signer: C::AccountId, + prepare_extrinsic: impl FnOnce(C::Index) -> Bytes, + ) -> Result { + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(extrinsic_signer).await?; + let extrinsic = prepare_extrinsic(transaction_nonce); + let tx_hash = Substrate::::author_submit_extrinsic(&*self.client, extrinsic).await?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(tx_hash) + } + + /// Get the GRANDPA authority set at given block. + pub async fn grandpa_authorities_set(&self, block: C::Hash) -> Result { + let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); + let data = Bytes(Vec::new()); + + let encoded_response = Substrate::::state_call(&*self.client, call, data, Some(block)).await?; + let authority_list = encoded_response.0; + + Ok(authority_list) + } + + /// Execute runtime call at given block. + pub async fn state_call(&self, method: String, data: Bytes, at_block: Option) -> Result { + Substrate::::state_call(&*self.client, method, data, at_block) + .await + .map_err(Into::into) + } + + /// Returns storage proof of given storage keys. + pub async fn prove_storage(&self, keys: Vec, at_block: C::Hash) -> Result { + Substrate::::state_prove_storage(&*self.client, keys, Some(at_block)) + .await + .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect())) + .map_err(Into::into) + } + + /// Return new justifications stream. + pub async fn subscribe_justifications(&self) -> Result { + Ok(self + .client + .subscribe( + "grandpa_subscribeJustifications", + JsonRpcParams::NoParams, + "grandpa_unsubscribeJustifications", + ) + .await?) + } +} diff --git a/polkadot/relays/client-substrate/src/error.rs b/polkadot/relays/client-substrate/src/error.rs new file mode 100644 index 00000000000..304229ede19 --- /dev/null +++ b/polkadot/relays/client-substrate/src/error.rs @@ -0,0 +1,105 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Substrate node RPC errors. + +use jsonrpsee_ws_client::Error as RpcError; +use relay_utils::MaybeConnectionError; +use sc_rpc_api::system::Health; + +/// Result type used by Substrate client. +pub type Result = std::result::Result; + +/// Errors that can occur only when interacting with +/// a Substrate node through RPC. +#[derive(Debug)] +pub enum Error { + /// An error that can occur when making a request to + /// an JSON-RPC server. + RpcError(RpcError), + /// The response from the server could not be SCALE decoded. + ResponseParseFailed(codec::Error), + /// The Substrate bridge pallet has not yet been initialized. + UninitializedBridgePallet, + /// Account does not exist on the chain. + AccountDoesNotExist, + /// Runtime storage is missing mandatory ":code:" entry. + MissingMandatoryCodeEntry, + /// The client we're connected to is not synced, so we can't rely on its state. + ClientNotSynced(Health), + /// An error has happened when we have tried to parse storage proof. + StorageProofError(bp_runtime::StorageProofError), + /// Custom logic error. + Custom(String), +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Self::RpcError(ref e) => Some(e), + Self::ResponseParseFailed(ref e) => Some(e), + Self::UninitializedBridgePallet => None, + Self::AccountDoesNotExist => None, + Self::MissingMandatoryCodeEntry => None, + Self::ClientNotSynced(_) => None, + Self::StorageProofError(_) => None, + Self::Custom(_) => None, + } + } +} + +impl From for Error { + fn from(error: RpcError) -> Self { + Error::RpcError(error) + } +} + +impl MaybeConnectionError for Error { + fn is_connection_error(&self) -> bool { + matches!( + *self, + Error::RpcError(RpcError::TransportError(_)) + // right now if connection to the ws server is dropped (after it is already established), + // we're getting this error + | Error::RpcError(RpcError::Internal(_)) + | Error::RpcError(RpcError::RestartNeeded(_)) + | Error::ClientNotSynced(_), + ) + } +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s = match self { + Self::RpcError(e) => e.to_string(), + Self::ResponseParseFailed(e) => e.to_string(), + Self::UninitializedBridgePallet => "The Substrate bridge pallet has not been initialized yet.".into(), + Self::AccountDoesNotExist => "Account does not exist on the chain".into(), + Self::MissingMandatoryCodeEntry => "Mandatory :code: entry is missing from runtime storage".into(), + Self::StorageProofError(e) => format!("Error when parsing storage proof: {:?}", e), + Self::ClientNotSynced(health) => format!("Substrate client is not synced: {}", health), + Self::Custom(e) => e.clone(), + }; + + write!(f, "{}", s) + } +} + +impl From for String { + fn from(error: Error) -> String { + error.to_string() + } +} diff --git a/polkadot/relays/client-substrate/src/finality_source.rs b/polkadot/relays/client-substrate/src/finality_source.rs new file mode 100644 index 00000000000..38500934191 --- /dev/null +++ b/polkadot/relays/client-substrate/src/finality_source.rs @@ -0,0 +1,135 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Default generic implementation of finality source for basic Substrate client. + +use crate::chain::{BlockWithJustification, Chain}; +use crate::client::Client; +use crate::error::Error; +use crate::sync_header::SyncHeader; + +use async_trait::async_trait; +use bp_header_chain::justification::GrandpaJustification; +use codec::Decode; +use finality_relay::{FinalitySyncPipeline, SourceClient, SourceHeader}; +use futures::stream::{unfold, Stream, StreamExt}; +use relay_utils::relay_loop::Client as RelayClient; +use sp_runtime::traits::Header as HeaderT; +use std::{marker::PhantomData, pin::Pin}; + +/// Substrate node as finality source. +pub struct FinalitySource { + client: Client, + _phantom: PhantomData

, +} + +impl FinalitySource { + /// Create new headers source using given client. + pub fn new(client: Client) -> Self { + FinalitySource { + client, + _phantom: Default::default(), + } + } +} + +impl Clone for FinalitySource { + fn clone(&self) -> Self { + FinalitySource { + client: self.client.clone(), + _phantom: Default::default(), + } + } +} + +#[async_trait] +impl RelayClient for FinalitySource { + type Error = Error; + + async fn reconnect(&mut self) -> Result<(), Error> { + self.client.reconnect().await + } +} + +#[async_trait] +impl SourceClient

for FinalitySource +where + C: Chain, + C::BlockNumber: relay_utils::BlockNumberBase, + P: FinalitySyncPipeline< + Hash = C::Hash, + Number = C::BlockNumber, + Header = SyncHeader, + FinalityProof = GrandpaJustification, + >, + P::Header: SourceHeader, +{ + type FinalityProofsStream = Pin> + Send>>; + + async fn best_finalized_block_number(&self) -> Result { + // we **CAN** continue to relay finality proofs if source node is out of sync, because + // target node may be missing proofs that are already available at the source + let finalized_header_hash = self.client.best_finalized_header_hash().await?; + let finalized_header = self.client.header_by_hash(finalized_header_hash).await?; + Ok(*finalized_header.number()) + } + + async fn header_and_finality_proof( + &self, + number: P::Number, + ) -> Result<(P::Header, Option), Error> { + let header_hash = self.client.block_hash_by_number(number).await?; + let signed_block = self.client.get_block(Some(header_hash)).await?; + + let justification = signed_block + .justification() + .map(|raw_justification| GrandpaJustification::::decode(&mut raw_justification.as_slice())) + .transpose() + .map_err(Error::ResponseParseFailed)?; + + Ok((signed_block.header().into(), justification)) + } + + async fn finality_proofs(&self) -> Result { + Ok(unfold( + self.client.clone().subscribe_justifications().await?, + move |mut subscription| async move { + loop { + let next_justification = subscription.next().await?; + let decoded_justification = + GrandpaJustification::::decode(&mut &next_justification.0[..]); + + let justification = match decoded_justification { + Ok(j) => j, + Err(err) => { + log::error!( + target: "bridge", + "Failed to decode justification target from the {} justifications stream: {:?}", + P::SOURCE_NAME, + err, + ); + + continue; + } + }; + + return Some((justification, subscription)); + } + }, + ) + .boxed()) + } +} diff --git a/polkadot/relays/client-substrate/src/guard.rs b/polkadot/relays/client-substrate/src/guard.rs new file mode 100644 index 00000000000..68fef1c4c9c --- /dev/null +++ b/polkadot/relays/client-substrate/src/guard.rs @@ -0,0 +1,373 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Pallet provides a set of guard functions that are running in background threads +//! and are aborting process if some condition fails. + +use crate::{Chain, ChainWithBalances, Client}; + +use async_trait::async_trait; +use num_traits::CheckedSub; +use sp_version::RuntimeVersion; +use std::{ + collections::VecDeque, + time::{Duration, Instant}, +}; + +/// Guards environment. +#[async_trait] +pub trait Environment: Send + Sync + 'static { + /// Return current runtime version. + async fn runtime_version(&mut self) -> Result; + /// Return free native balance of the account on the chain. + async fn free_native_balance(&mut self, account: C::AccountId) -> Result; + + /// Return current time. + fn now(&self) -> Instant { + Instant::now() + } + /// Sleep given amount of time. + async fn sleep(&mut self, duration: Duration) { + async_std::task::sleep(duration).await + } + /// Abort current process. Called when guard condition check fails. + async fn abort(&mut self) { + std::process::abort(); + } +} + +/// Abort when runtime spec version is different from specified. +pub fn abort_on_spec_version_change(mut env: impl Environment, expected_spec_version: u32) { + async_std::task::spawn(async move { + loop { + let actual_spec_version = env.runtime_version().await; + match actual_spec_version { + Ok(version) if version.spec_version == expected_spec_version => (), + Ok(version) => { + log::error!( + target: "bridge-guard", + "{} runtime spec version has changed from {} to {}. Aborting relay", + C::NAME, + expected_spec_version, + version.spec_version, + ); + + env.abort().await; + } + Err(error) => log::warn!( + target: "bridge-guard", + "Failed to read {} runtime version: {:?}. Relay may need to be stopped manually", + C::NAME, + error, + ), + } + + env.sleep(conditions_check_delay::()).await; + } + }); +} + +/// Abort if, during a 24 hours, free balance of given account is decreased at least by given value. +/// Other components may increase (or decrease) balance of account and it WILL affect logic of the guard. +pub fn abort_when_account_balance_decreased( + mut env: impl Environment, + account_id: C::AccountId, + maximal_decrease: C::NativeBalance, +) { + const DAY: Duration = Duration::from_secs(60 * 60 * 24); + + async_std::task::spawn(async move { + let mut balances = VecDeque::new(); + + loop { + let current_time = env.now(); + + // remember balances that are beyound 24h border + let time_border = current_time - DAY; + while balances.front().map(|(time, _)| *time < time_border).unwrap_or(false) { + balances.pop_front(); + } + + // read balance of the account + let current_balance = env.free_native_balance(account_id.clone()).await; + + // remember balance and check difference + match current_balance { + Ok(current_balance) => { + // remember balance + balances.push_back((current_time, current_balance)); + + // check if difference between current and oldest balance is too large + let (oldest_time, oldest_balance) = + balances.front().expect("pushed to queue couple of lines above; qed"); + let balances_difference = oldest_balance.checked_sub(¤t_balance); + if balances_difference > Some(maximal_decrease) { + log::error!( + target: "bridge-guard", + "Balance of {} account {:?} has decreased from {:?} to {:?} in {} minutes. Aborting relay", + C::NAME, + account_id, + oldest_balance, + current_balance, + current_time.duration_since(*oldest_time).as_secs() / 60, + ); + + env.abort().await; + } + } + Err(error) => { + log::warn!( + target: "bridge-guard", + "Failed to read {} account {:?} balance: {:?}. Relay may need to be stopped manually", + C::NAME, + account_id, + error, + ); + } + }; + + env.sleep(conditions_check_delay::()).await; + } + }); +} + +/// Delay between conditions check. +fn conditions_check_delay() -> Duration { + C::AVERAGE_BLOCK_INTERVAL * (10 + rand::random::() % 10) +} + +#[async_trait] +impl Environment for Client { + async fn runtime_version(&mut self) -> Result { + Client::::runtime_version(self).await.map_err(|e| e.to_string()) + } + + async fn free_native_balance(&mut self, account: C::AccountId) -> Result { + Client::::free_native_balance(self, account) + .await + .map_err(|e| e.to_string()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::{ + channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, + future::FutureExt, + stream::StreamExt, + SinkExt, + }; + + #[derive(Debug, Clone)] + struct TestChain; + + impl bp_runtime::Chain for TestChain { + type BlockNumber = u32; + type Hash = sp_core::H256; + type Hasher = sp_runtime::traits::BlakeTwo256; + type Header = sp_runtime::generic::Header; + } + + impl Chain for TestChain { + const NAME: &'static str = "Test"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(1); + + type AccountId = u32; + type Index = u32; + type SignedBlock = + sp_runtime::generic::SignedBlock>; + type Call = (); + } + + impl ChainWithBalances for TestChain { + type NativeBalance = u32; + + fn account_info_storage_key(_account_id: &u32) -> sp_core::storage::StorageKey { + unreachable!() + } + } + + struct TestEnvironment { + runtime_version_rx: UnboundedReceiver, + free_native_balance_rx: UnboundedReceiver, + slept_tx: UnboundedSender<()>, + aborted_tx: UnboundedSender<()>, + } + + #[async_trait] + impl Environment for TestEnvironment { + async fn runtime_version(&mut self) -> Result { + Ok(self.runtime_version_rx.next().await.unwrap_or_default()) + } + + async fn free_native_balance(&mut self, _account: u32) -> Result { + Ok(self.free_native_balance_rx.next().await.unwrap_or_default()) + } + + async fn sleep(&mut self, _duration: Duration) { + let _ = self.slept_tx.send(()).await; + } + + async fn abort(&mut self) { + let _ = self.aborted_tx.send(()).await; + // simulate process abort :) + async_std::task::sleep(Duration::from_secs(60)).await; + } + } + + #[test] + fn aborts_when_spec_version_is_changed() { + async_std::task::block_on(async { + let ( + (mut runtime_version_tx, runtime_version_rx), + (_free_native_balance_tx, free_native_balance_rx), + (slept_tx, mut slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded(), unbounded()); + abort_on_spec_version_change( + TestEnvironment { + runtime_version_rx, + free_native_balance_rx, + slept_tx, + aborted_tx, + }, + 0, + ); + + // client responds with wrong version + runtime_version_tx + .send(RuntimeVersion { + spec_version: 42, + ..Default::default() + }) + .await + .unwrap(); + + // then the `abort` function is called + aborted_rx.next().await; + // and we do not reach the `sleep` function call + assert!(slept_rx.next().now_or_never().is_none()); + }); + } + + #[test] + fn does_not_aborts_when_spec_version_is_unchanged() { + async_std::task::block_on(async { + let ( + (mut runtime_version_tx, runtime_version_rx), + (_free_native_balance_tx, free_native_balance_rx), + (slept_tx, mut slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded(), unbounded()); + abort_on_spec_version_change( + TestEnvironment { + runtime_version_rx, + free_native_balance_rx, + slept_tx, + aborted_tx, + }, + 42, + ); + + // client responds with the same version + runtime_version_tx + .send(RuntimeVersion { + spec_version: 42, + ..Default::default() + }) + .await + .unwrap(); + + // then the `sleep` function is called + slept_rx.next().await; + // and the `abort` function is not called + assert!(aborted_rx.next().now_or_never().is_none()); + }); + } + + #[test] + fn aborts_when_balance_is_too_low() { + async_std::task::block_on(async { + let ( + (_runtime_version_tx, runtime_version_rx), + (mut free_native_balance_tx, free_native_balance_rx), + (slept_tx, mut slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded(), unbounded()); + abort_when_account_balance_decreased( + TestEnvironment { + runtime_version_rx, + free_native_balance_rx, + slept_tx, + aborted_tx, + }, + 0, + 100, + ); + + // client responds with initial balance + free_native_balance_tx.send(1000).await.unwrap(); + + // then the guard sleeps + slept_rx.next().await; + + // and then client responds with updated balance, which is too low + free_native_balance_tx.send(899).await.unwrap(); + + // then the `abort` function is called + aborted_rx.next().await; + // and we do not reach next `sleep` function call + assert!(slept_rx.next().now_or_never().is_none()); + }); + } + + #[test] + fn does_not_aborts_when_balance_is_enough() { + async_std::task::block_on(async { + let ( + (_runtime_version_tx, runtime_version_rx), + (mut free_native_balance_tx, free_native_balance_rx), + (slept_tx, mut slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded(), unbounded()); + abort_when_account_balance_decreased( + TestEnvironment { + runtime_version_rx, + free_native_balance_rx, + slept_tx, + aborted_tx, + }, + 0, + 100, + ); + + // client responds with initial balance + free_native_balance_tx.send(1000).await.unwrap(); + + // then the guard sleeps + slept_rx.next().await; + + // and then client responds with updated balance, which is enough + free_native_balance_tx.send(950).await.unwrap(); + + // then the `sleep` function is called + slept_rx.next().await; + // and `abort` is not called + assert!(aborted_rx.next().now_or_never().is_none()); + }); + } +} diff --git a/polkadot/relays/client-substrate/src/headers_source.rs b/polkadot/relays/client-substrate/src/headers_source.rs new file mode 100644 index 00000000000..3dfcb220de4 --- /dev/null +++ b/polkadot/relays/client-substrate/src/headers_source.rs @@ -0,0 +1,108 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Default generic implementation of headers source for basic Substrate client. + +use crate::chain::{BlockWithJustification, Chain}; +use crate::client::Client; +use crate::error::Error; + +use async_trait::async_trait; +use headers_relay::{ + sync_loop::SourceClient, + sync_types::{HeaderIdOf, HeadersSyncPipeline, QueuedHeader, SourceHeader}, +}; +use relay_utils::relay_loop::Client as RelayClient; +use sp_runtime::{traits::Header as HeaderT, EncodedJustification}; +use std::marker::PhantomData; + +/// Substrate node as headers source. +pub struct HeadersSource { + client: Client, + _phantom: PhantomData

, +} + +impl HeadersSource { + /// Create new headers source using given client. + pub fn new(client: Client) -> Self { + HeadersSource { + client, + _phantom: Default::default(), + } + } +} + +impl Clone for HeadersSource { + fn clone(&self) -> Self { + HeadersSource { + client: self.client.clone(), + _phantom: Default::default(), + } + } +} + +#[async_trait] +impl RelayClient for HeadersSource { + type Error = Error; + + async fn reconnect(&mut self) -> Result<(), Error> { + self.client.reconnect().await + } +} + +#[async_trait] +impl SourceClient

for HeadersSource +where + C: Chain, + C::BlockNumber: relay_utils::BlockNumberBase, + C::Header: Into, + P: HeadersSyncPipeline, + P::Header: SourceHeader, +{ + async fn best_block_number(&self) -> Result { + // we **CAN** continue to relay headers if source node is out of sync, because + // target node may be missing headers that are already available at the source + Ok(*self.client.best_header().await?.number()) + } + + async fn header_by_hash(&self, hash: P::Hash) -> Result { + self.client + .header_by_hash(hash) + .await + .map(Into::into) + .map_err(Into::into) + } + + async fn header_by_number(&self, number: P::Number) -> Result { + self.client + .header_by_number(number) + .await + .map(Into::into) + .map_err(Into::into) + } + + async fn header_completion(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, Option), Error> { + let hash = id.1; + let signed_block = self.client.get_block(Some(hash)).await?; + let grandpa_justification = signed_block.justification().cloned(); + + Ok((id, grandpa_justification)) + } + + async fn header_extra(&self, id: HeaderIdOf

, _header: QueuedHeader

) -> Result<(HeaderIdOf

, ()), Error> { + Ok((id, ())) + } +} diff --git a/polkadot/relays/client-substrate/src/lib.rs b/polkadot/relays/client-substrate/src/lib.rs new file mode 100644 index 00000000000..0f1bfb481e7 --- /dev/null +++ b/polkadot/relays/client-substrate/src/lib.rs @@ -0,0 +1,60 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tools to interact with (Open) Ethereum node using RPC methods. + +#![warn(missing_docs)] + +mod chain; +mod client; +mod error; +mod rpc; +mod sync_header; + +pub mod finality_source; +pub mod guard; +pub mod headers_source; +pub mod metrics; + +pub use crate::chain::{BlockWithJustification, Chain, ChainWithBalances, TransactionSignScheme}; +pub use crate::client::{Client, JustificationsSubscription, OpaqueGrandpaAuthoritiesSet}; +pub use crate::error::{Error, Result}; +pub use crate::sync_header::SyncHeader; +pub use bp_runtime::{BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf}; + +/// Header id used by the chain. +pub type HeaderIdOf = relay_utils::HeaderId, BlockNumberOf>; + +/// Substrate-over-websocket connection params. +#[derive(Debug, Clone)] +pub struct ConnectionParams { + /// Websocket server hostname. + pub host: String, + /// Websocket server TCP port. + pub port: u16, + /// Use secure websocket connection. + pub secure: bool, +} + +impl Default for ConnectionParams { + fn default() -> Self { + ConnectionParams { + host: "localhost".into(), + port: 9944, + secure: false, + } + } +} diff --git a/polkadot/relays/client-substrate/src/metrics/float_storage_value.rs b/polkadot/relays/client-substrate/src/metrics/float_storage_value.rs new file mode 100644 index 00000000000..f3ba8988eea --- /dev/null +++ b/polkadot/relays/client-substrate/src/metrics/float_storage_value.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::chain::Chain; +use crate::client::Client; + +use async_trait::async_trait; +use codec::Decode; +use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64}; +use sp_core::storage::StorageKey; +use sp_runtime::{traits::UniqueSaturatedInto, FixedPointNumber}; +use std::time::Duration; + +/// Storage value update interval (in blocks). +const UPDATE_INTERVAL_IN_BLOCKS: u32 = 5; + +/// Metric that represents fixed-point runtime storage value as float gauge. +#[derive(Clone, Debug)] +pub struct FloatStorageValueMetric { + client: Client, + storage_key: StorageKey, + maybe_default_value: Option, + metric: Gauge, +} + +impl FloatStorageValueMetric { + /// Create new metric. + pub fn new( + registry: &Registry, + prefix: Option<&str>, + client: Client, + storage_key: StorageKey, + maybe_default_value: Option, + name: String, + help: String, + ) -> Result { + Ok(FloatStorageValueMetric { + client, + storage_key, + maybe_default_value, + metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + }) + } +} + +#[async_trait] +impl StandaloneMetrics for FloatStorageValueMetric +where + T: 'static + Decode + Send + Sync + FixedPointNumber, +{ + fn update_interval(&self) -> Duration { + C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS + } + + async fn update(&self) { + relay_utils::metrics::set_gauge_value( + &self.metric, + self.client + .storage_value::(self.storage_key.clone()) + .await + .map(|maybe_storage_value| { + maybe_storage_value.or(self.maybe_default_value).map(|storage_value| { + storage_value.into_inner().unique_saturated_into() as f64 + / T::DIV.unique_saturated_into() as f64 + }) + }), + ); + } +} diff --git a/polkadot/relays/client-substrate/src/metrics/mod.rs b/polkadot/relays/client-substrate/src/metrics/mod.rs new file mode 100644 index 00000000000..177e2a709cf --- /dev/null +++ b/polkadot/relays/client-substrate/src/metrics/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Contains several Substrate-specific metrics that may be exposed by relay. + +pub use float_storage_value::FloatStorageValueMetric; +pub use storage_proof_overhead::StorageProofOverheadMetric; + +mod float_storage_value; +mod storage_proof_overhead; diff --git a/polkadot/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/polkadot/relays/client-substrate/src/metrics/storage_proof_overhead.rs new file mode 100644 index 00000000000..526fe1e048b --- /dev/null +++ b/polkadot/relays/client-substrate/src/metrics/storage_proof_overhead.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::chain::Chain; +use crate::client::Client; +use crate::error::Error; + +use async_trait::async_trait; +use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64}; +use sp_core::storage::StorageKey; +use sp_runtime::traits::Header as HeaderT; +use sp_storage::well_known_keys::CODE; +use std::time::Duration; + +/// Storage proof overhead update interval (in blocks). +const UPDATE_INTERVAL_IN_BLOCKS: u32 = 100; + +/// Metric that represents extra size of storage proof as unsigned integer gauge. +/// +/// There's one thing to keep in mind when using this metric: the overhead may be slightly +/// different for other values, but this metric gives a good estimation. +#[derive(Debug)] +pub struct StorageProofOverheadMetric { + client: Client, + metric: Gauge, +} + +impl Clone for StorageProofOverheadMetric { + fn clone(&self) -> Self { + StorageProofOverheadMetric { + client: self.client.clone(), + metric: self.metric.clone(), + } + } +} + +impl StorageProofOverheadMetric { + /// Create new metric instance with given name and help. + pub fn new( + registry: &Registry, + prefix: Option<&str>, + client: Client, + name: String, + help: String, + ) -> Result { + Ok(StorageProofOverheadMetric { + client, + metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + }) + } + + /// Returns approximate storage proof size overhead. + async fn compute_storage_proof_overhead(&self) -> Result { + let best_header_hash = self.client.best_finalized_header_hash().await?; + let best_header = self.client.header_by_hash(best_header_hash).await?; + + let storage_proof = self + .client + .prove_storage(vec![StorageKey(CODE.to_vec())], best_header_hash) + .await?; + let storage_proof_size: usize = storage_proof.clone().iter_nodes().map(|n| n.len()).sum(); + + let storage_value_reader = + bp_runtime::StorageProofChecker::::new(*best_header.state_root(), storage_proof) + .map_err(Error::StorageProofError)?; + let maybe_encoded_storage_value = storage_value_reader + .read_value(CODE) + .map_err(Error::StorageProofError)?; + let encoded_storage_value_size = maybe_encoded_storage_value + .ok_or(Error::MissingMandatoryCodeEntry)? + .len(); + + Ok(storage_proof_size - encoded_storage_value_size) + } +} + +#[async_trait] +impl StandaloneMetrics for StorageProofOverheadMetric { + fn update_interval(&self) -> Duration { + C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS + } + + async fn update(&self) { + relay_utils::metrics::set_gauge_value( + &self.metric, + self.compute_storage_proof_overhead() + .await + .map(|overhead| Some(overhead as u64)), + ); + } +} diff --git a/polkadot/relays/client-substrate/src/rpc.rs b/polkadot/relays/client-substrate/src/rpc.rs new file mode 100644 index 00000000000..06df1f705d0 --- /dev/null +++ b/polkadot/relays/client-substrate/src/rpc.rs @@ -0,0 +1,53 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The most generic Substrate node RPC interface. + +use crate::chain::Chain; + +use sc_rpc_api::{state::ReadProof, system::Health}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, +}; +use sp_version::RuntimeVersion; + +jsonrpsee_proc_macros::rpc_client_api! { + pub(crate) Substrate { + #[rpc(method = "system_health", positional_params)] + fn system_health() -> Health; + #[rpc(method = "chain_getHeader", positional_params)] + fn chain_get_header(block_hash: Option) -> C::Header; + #[rpc(method = "chain_getFinalizedHead", positional_params)] + fn chain_get_finalized_head() -> C::Hash; + #[rpc(method = "chain_getBlock", positional_params)] + fn chain_get_block(block_hash: Option) -> C::SignedBlock; + #[rpc(method = "chain_getBlockHash", positional_params)] + fn chain_get_block_hash(block_number: Option) -> C::Hash; + #[rpc(method = "system_accountNextIndex", positional_params)] + fn system_account_next_index(account_id: C::AccountId) -> C::Index; + #[rpc(method = "author_submitExtrinsic", positional_params)] + fn author_submit_extrinsic(extrinsic: Bytes) -> C::Hash; + #[rpc(method = "state_call", positional_params)] + fn state_call(method: String, data: Bytes, at_block: Option) -> Bytes; + #[rpc(method = "state_getStorage", positional_params)] + fn state_get_storage(key: StorageKey) -> Option; + #[rpc(method = "state_getReadProof", positional_params)] + fn state_prove_storage(keys: Vec, hash: Option) -> ReadProof; + #[rpc(method = "state_getRuntimeVersion", positional_params)] + fn state_runtime_version() -> RuntimeVersion; + } +} diff --git a/polkadot/relays/client-substrate/src/sync_header.rs b/polkadot/relays/client-substrate/src/sync_header.rs new file mode 100644 index 00000000000..0b74dee690f --- /dev/null +++ b/polkadot/relays/client-substrate/src/sync_header.rs @@ -0,0 +1,73 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use bp_header_chain::find_grandpa_authorities_scheduled_change; +use finality_relay::SourceHeader as FinalitySourceHeader; +use headers_relay::sync_types::SourceHeader; +use num_traits::{CheckedSub, One}; +use relay_utils::HeaderId; +use sp_runtime::traits::Header as HeaderT; + +/// Generic wrapper for `sp_runtime::traits::Header` based headers, that +/// implements `headers_relay::sync_types::SourceHeader` and may be used in headers sync directly. +#[derive(Clone, Debug, PartialEq)] +pub struct SyncHeader

(Header); + +impl
SyncHeader
{ + /// Extracts wrapped header from self. + pub fn into_inner(self) -> Header { + self.0 + } +} + +impl
std::ops::Deref for SyncHeader
{ + type Target = Header; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl
From
for SyncHeader
{ + fn from(header: Header) -> Self { + Self(header) + } +} + +impl SourceHeader for SyncHeader
{ + fn id(&self) -> HeaderId { + relay_utils::HeaderId(*self.0.number(), self.hash()) + } + + fn parent_id(&self) -> HeaderId { + relay_utils::HeaderId( + self.number() + .checked_sub(&One::one()) + .expect("should never be called for genesis header"), + *self.parent_hash(), + ) + } +} + +impl FinalitySourceHeader for SyncHeader
{ + fn number(&self) -> Header::Number { + *self.0.number() + } + + fn is_mandatory(&self) -> bool { + find_grandpa_authorities_scheduled_change(&self.0).is_some() + } +} diff --git a/polkadot/relays/client-westend/Cargo.toml b/polkadot/relays/client-westend/Cargo.toml new file mode 100644 index 00000000000..a408ae3a46d --- /dev/null +++ b/polkadot/relays/client-westend/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "relay-westend-client" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0" } +headers-relay = { path = "../headers" } +relay-substrate-client = { path = "../client-substrate" } +relay-utils = { path = "../utils" } + +# Bridge dependencies + +bp-westend = { path = "../../primitives/chain-westend" } + +# Substrate Dependencies + +frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/client-westend/src/lib.rs b/polkadot/relays/client-westend/src/lib.rs new file mode 100644 index 00000000000..417938ccf5a --- /dev/null +++ b/polkadot/relays/client-westend/src/lib.rs @@ -0,0 +1,97 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types used to connect to the Westend chain. + +use codec::Encode; +use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme}; +use sp_core::{storage::StorageKey, Pair}; +use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; +use std::time::Duration; + +/// Westend header id. +pub type HeaderId = relay_utils::HeaderId; + +/// Westend header type used in headers sync. +pub type SyncHeader = relay_substrate_client::SyncHeader; + +/// Westend chain definition +#[derive(Debug, Clone, Copy)] +pub struct Westend; + +impl ChainBase for Westend { + type BlockNumber = bp_westend::BlockNumber; + type Hash = bp_westend::Hash; + type Hasher = bp_westend::Hasher; + type Header = bp_westend::Header; +} + +impl Chain for Westend { + const NAME: &'static str = "Westend"; + const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); + + type AccountId = bp_westend::AccountId; + type Index = bp_westend::Nonce; + type SignedBlock = bp_westend::SignedBlock; + type Call = bp_westend::Call; +} + +impl ChainWithBalances for Westend { + type NativeBalance = bp_westend::Balance; + + fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { + StorageKey(bp_westend::account_info_storage_key(account_id)) + } +} + +impl TransactionSignScheme for Westend { + type Chain = Westend; + type AccountKeyPair = sp_core::sr25519::Pair; + type SignedTransaction = bp_westend::UncheckedExtrinsic; + + fn sign_transaction( + genesis_hash: ::Hash, + signer: &Self::AccountKeyPair, + signer_nonce: ::Index, + call: ::Call, + ) -> Self::SignedTransaction { + let raw_payload = SignedPayload::new( + call, + bp_westend::SignedExtensions::new( + bp_westend::VERSION, + sp_runtime::generic::Era::Immortal, + genesis_hash, + signer_nonce, + 0, + ), + ) + .expect("SignedExtension never fails."); + + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let signer: sp_runtime::MultiSigner = signer.public().into(); + let (call, extra, _) = raw_payload.deconstruct(); + + bp_westend::UncheckedExtrinsic::new_signed( + call, + sp_runtime::MultiAddress::Id(signer.into_account()), + signature.into(), + extra, + ) + } +} + +/// Westend signing params. +pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/relays/exchange/Cargo.toml b/polkadot/relays/exchange/Cargo.toml new file mode 100644 index 00000000000..62e7a029bbb --- /dev/null +++ b/polkadot/relays/exchange/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "exchange-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +async-std = "1.6.5" +async-trait = "0.1.40" +backoff = "0.2" +futures = "0.3.5" +log = "0.4.11" +num-traits = "0.2" +parking_lot = "0.11.0" +relay-utils = { path = "../utils" } diff --git a/polkadot/relays/exchange/src/exchange.rs b/polkadot/relays/exchange/src/exchange.rs new file mode 100644 index 00000000000..cec0d7cba1f --- /dev/null +++ b/polkadot/relays/exchange/src/exchange.rs @@ -0,0 +1,916 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relaying proofs of exchange transaction. + +use async_trait::async_trait; +use relay_utils::{ + relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, +}; +use std::{ + fmt::{Debug, Display}, + string::ToString, +}; + +/// Transaction proof pipeline. +pub trait TransactionProofPipeline { + /// Name of the transaction proof source. + const SOURCE_NAME: &'static str; + /// Name of the transaction proof target. + const TARGET_NAME: &'static str; + + /// Block type. + type Block: SourceBlock; + /// Transaction inclusion proof type. + type TransactionProof; +} + +/// Block that is participating in exchange. +pub trait SourceBlock { + /// Block hash type. + type Hash: Clone + Debug + Display; + /// Block number type. + type Number: Debug + + Display + + Clone + + Copy + + Into + + std::cmp::Ord + + std::ops::Add + + num_traits::One; + /// Block transaction. + type Transaction: SourceTransaction; + + /// Return hash of the block. + fn id(&self) -> relay_utils::HeaderId; + /// Return block transactions iterator. + fn transactions(&self) -> Vec; +} + +/// Transaction that is participating in exchange. +pub trait SourceTransaction { + /// Transaction hash type. + type Hash: Debug + Display; + + /// Return transaction hash. + fn hash(&self) -> Self::Hash; +} + +/// Block hash for given pipeline. +pub type BlockHashOf

= <

::Block as SourceBlock>::Hash; + +/// Block number for given pipeline. +pub type BlockNumberOf

= <

::Block as SourceBlock>::Number; + +/// Transaction hash for given pipeline. +pub type TransactionOf

= <

::Block as SourceBlock>::Transaction; + +/// Transaction hash for given pipeline. +pub type TransactionHashOf

= as SourceTransaction>::Hash; + +/// Header id. +pub type HeaderId

= relay_utils::HeaderId, BlockNumberOf

>; + +/// Source client API. +#[async_trait] +pub trait SourceClient: RelayClient { + /// Sleep until exchange-related data is (probably) updated. + async fn tick(&self); + /// Get block by hash. + async fn block_by_hash(&self, hash: BlockHashOf

) -> Result; + /// Get canonical block by number. + async fn block_by_number(&self, number: BlockNumberOf

) -> Result; + /// Return block + index where transaction has been **mined**. May return `Ok(None)` if transaction + /// is unknown to the source node. + async fn transaction_block(&self, hash: &TransactionHashOf

) + -> Result, usize)>, Self::Error>; + /// Prepare transaction proof. + async fn transaction_proof(&self, block: &P::Block, tx_index: usize) -> Result; +} + +/// Target client API. +#[async_trait] +pub trait TargetClient: RelayClient { + /// Sleep until exchange-related data is (probably) updated. + async fn tick(&self); + /// Returns `Ok(true)` if header is known to the target node. + async fn is_header_known(&self, id: &HeaderId

) -> Result; + /// Returns `Ok(true)` if header is finalized by the target node. + async fn is_header_finalized(&self, id: &HeaderId

) -> Result; + /// Returns best finalized header id. + async fn best_finalized_header_id(&self) -> Result, Self::Error>; + /// Returns `Ok(true)` if transaction proof is need to be relayed. + async fn filter_transaction_proof(&self, proof: &P::TransactionProof) -> Result; + /// Submits transaction proof to the target node. + async fn submit_transaction_proof(&self, proof: P::TransactionProof) -> Result<(), Self::Error>; +} + +/// Block transaction statistics. +#[derive(Debug, Default)] +#[cfg_attr(test, derive(PartialEq))] +pub struct RelayedBlockTransactions { + /// Total number of transactions processed (either relayed or ignored) so far. + pub processed: usize, + /// Total number of transactions successfully relayed so far. + pub relayed: usize, + /// Total number of transactions that we have failed to relay so far. + pub failed: usize, +} + +/// Relay all suitable transactions from single block. +/// +/// If connection error occurs, returns Err with number of successfully processed transactions. +/// If some other error occurs, it is ignored and other transactions are processed. +/// +/// All transaction-level traces are written by this function. This function is not tracing +/// any information about block. +pub async fn relay_block_transactions( + source_client: &impl SourceClient

, + target_client: &impl TargetClient

, + source_block: &P::Block, + mut relayed_transactions: RelayedBlockTransactions, +) -> Result { + let transactions_to_process = source_block + .transactions() + .into_iter() + .enumerate() + .skip(relayed_transactions.processed); + for (source_tx_index, source_tx) in transactions_to_process { + let result = async { + let source_tx_id = format!("{}/{}", source_block.id().1, source_tx_index); + let source_tx_proof = + prepare_transaction_proof(source_client, &source_tx_id, source_block, source_tx_index) + .await + .map_err(|e| (FailedClient::Source, e))?; + + let needs_to_be_relayed = + target_client + .filter_transaction_proof(&source_tx_proof) + .await + .map_err(|err| { + ( + FailedClient::Target, + StringifiedMaybeConnectionError::new( + err.is_connection_error(), + format!("Transaction filtering has failed with {:?}", err), + ), + ) + })?; + + if !needs_to_be_relayed { + return Ok(false); + } + + relay_ready_transaction_proof(target_client, &source_tx_id, source_tx_proof) + .await + .map(|_| true) + .map_err(|e| (FailedClient::Target, e)) + } + .await; + + // We have two options here: + // 1) retry with the same transaction later; + // 2) report error and proceed with next transaction. + // + // Option#1 may seems better, but: + // 1) we do not track if transaction is mined (without an error) by the target node; + // 2) error could be irrecoverable (e.g. when block is already pruned by bridge module or tx + // has invalid format) && we'll end up in infinite loop of retrying the same transaction proof. + // + // So we're going with option#2 here (the only exception are connection errors). + match result { + Ok(false) => { + relayed_transactions.processed += 1; + } + Ok(true) => { + log::info!( + target: "bridge", + "{} transaction {} proof has been successfully submitted to {} node", + P::SOURCE_NAME, + source_tx.hash(), + P::TARGET_NAME, + ); + + relayed_transactions.processed += 1; + relayed_transactions.relayed += 1; + } + Err((failed_client, err)) => { + log::error!( + target: "bridge", + "Error relaying {} transaction {} proof to {} node: {}. {}", + P::SOURCE_NAME, + source_tx.hash(), + P::TARGET_NAME, + err.to_string(), + if err.is_connection_error() { + "Going to retry after delay..." + } else { + "You may need to submit proof of this transaction manually" + }, + ); + + if err.is_connection_error() { + return Err((failed_client, relayed_transactions)); + } + + relayed_transactions.processed += 1; + relayed_transactions.failed += 1; + } + } + } + + Ok(relayed_transactions) +} + +/// Relay single transaction proof. +pub async fn relay_single_transaction_proof( + source_client: &impl SourceClient

, + target_client: &impl TargetClient

, + source_tx_hash: TransactionHashOf

, +) -> Result<(), String> { + // wait for transaction and header on source node + let (source_header_id, source_tx_index) = wait_transaction_mined(source_client, &source_tx_hash).await?; + let source_block = source_client.block_by_hash(source_header_id.1.clone()).await; + let source_block = source_block.map_err(|err| { + format!( + "Error retrieving block {} from {} node: {:?}", + source_header_id.1, + P::SOURCE_NAME, + err, + ) + })?; + + // wait for transaction and header on target node + wait_header_imported(target_client, &source_header_id).await?; + wait_header_finalized(target_client, &source_header_id).await?; + + // and finally - prepare and submit transaction proof to target node + let source_tx_id = format!("{}", source_tx_hash); + relay_ready_transaction_proof( + target_client, + &source_tx_id, + prepare_transaction_proof(source_client, &source_tx_id, &source_block, source_tx_index) + .await + .map_err(|err| err.to_string())?, + ) + .await + .map_err(|err| err.to_string()) +} + +/// Prepare transaction proof. +async fn prepare_transaction_proof( + source_client: &impl SourceClient

, + source_tx_id: &str, + source_block: &P::Block, + source_tx_index: usize, +) -> Result { + source_client + .transaction_proof(source_block, source_tx_index) + .await + .map_err(|err| { + StringifiedMaybeConnectionError::new( + err.is_connection_error(), + format!( + "Error building transaction {} proof on {} node: {:?}", + source_tx_id, + P::SOURCE_NAME, + err, + ), + ) + }) +} + +/// Relay prepared proof of transaction. +async fn relay_ready_transaction_proof( + target_client: &impl TargetClient

, + source_tx_id: &str, + source_tx_proof: P::TransactionProof, +) -> Result<(), StringifiedMaybeConnectionError> { + target_client + .submit_transaction_proof(source_tx_proof) + .await + .map_err(|err| { + StringifiedMaybeConnectionError::new( + err.is_connection_error(), + format!( + "Error submitting transaction {} proof to {} node: {:?}", + source_tx_id, + P::TARGET_NAME, + err, + ), + ) + }) +} + +/// Wait until transaction is mined by source node. +async fn wait_transaction_mined( + source_client: &impl SourceClient

, + source_tx_hash: &TransactionHashOf

, +) -> Result<(HeaderId

, usize), String> { + loop { + let source_header_and_tx = source_client.transaction_block(&source_tx_hash).await.map_err(|err| { + format!( + "Error retrieving transaction {} from {} node: {:?}", + source_tx_hash, + P::SOURCE_NAME, + err, + ) + })?; + match source_header_and_tx { + Some((source_header_id, source_tx)) => { + log::info!( + target: "bridge", + "Transaction {} is retrieved from {} node. Continuing...", + source_tx_hash, + P::SOURCE_NAME, + ); + + return Ok((source_header_id, source_tx)); + } + None => { + log::info!( + target: "bridge", + "Waiting for transaction {} to be mined by {} node...", + source_tx_hash, + P::SOURCE_NAME, + ); + + source_client.tick().await; + } + } + } +} + +/// Wait until target node imports required header. +async fn wait_header_imported( + target_client: &impl TargetClient

, + source_header_id: &HeaderId

, +) -> Result<(), String> { + loop { + let is_header_known = target_client.is_header_known(&source_header_id).await.map_err(|err| { + format!( + "Failed to check existence of header {}/{} on {} node: {:?}", + source_header_id.0, + source_header_id.1, + P::TARGET_NAME, + err, + ) + })?; + match is_header_known { + true => { + log::info!( + target: "bridge", + "Header {}/{} is known to {} node. Continuing.", + source_header_id.0, + source_header_id.1, + P::TARGET_NAME, + ); + + return Ok(()); + } + false => { + log::info!( + target: "bridge", + "Waiting for header {}/{} to be imported by {} node...", + source_header_id.0, + source_header_id.1, + P::TARGET_NAME, + ); + + target_client.tick().await; + } + } + } +} + +/// Wait until target node finalizes required header. +async fn wait_header_finalized( + target_client: &impl TargetClient

, + source_header_id: &HeaderId

, +) -> Result<(), String> { + loop { + let is_header_finalized = target_client + .is_header_finalized(&source_header_id) + .await + .map_err(|err| { + format!( + "Failed to check finality of header {}/{} on {} node: {:?}", + source_header_id.0, + source_header_id.1, + P::TARGET_NAME, + err, + ) + })?; + match is_header_finalized { + true => { + log::info!( + target: "bridge", + "Header {}/{} is finalizd by {} node. Continuing.", + source_header_id.0, + source_header_id.1, + P::TARGET_NAME, + ); + + return Ok(()); + } + false => { + log::info!( + target: "bridge", + "Waiting for header {}/{} to be finalized by {} node...", + source_header_id.0, + source_header_id.1, + P::TARGET_NAME, + ); + + target_client.tick().await; + } + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + + use parking_lot::Mutex; + use relay_utils::HeaderId; + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; + + pub fn test_block_id() -> TestHeaderId { + HeaderId(1, 1) + } + + pub fn test_next_block_id() -> TestHeaderId { + HeaderId(2, 2) + } + + pub fn test_transaction_hash(tx_index: u64) -> TestTransactionHash { + 200 + tx_index + } + + pub fn test_transaction(tx_index: u64) -> TestTransaction { + TestTransaction(test_transaction_hash(tx_index)) + } + + pub fn test_block() -> TestBlock { + TestBlock(test_block_id(), vec![test_transaction(0)]) + } + + pub fn test_next_block() -> TestBlock { + TestBlock(test_next_block_id(), vec![test_transaction(1)]) + } + + pub type TestBlockNumber = u64; + pub type TestBlockHash = u64; + pub type TestTransactionHash = u64; + pub type TestHeaderId = HeaderId; + + #[derive(Debug, Clone, PartialEq)] + pub struct TestError(pub bool); + + impl MaybeConnectionError for TestError { + fn is_connection_error(&self) -> bool { + self.0 + } + } + + pub struct TestTransactionProofPipeline; + + impl TransactionProofPipeline for TestTransactionProofPipeline { + const SOURCE_NAME: &'static str = "TestSource"; + const TARGET_NAME: &'static str = "TestTarget"; + + type Block = TestBlock; + type TransactionProof = TestTransactionProof; + } + + #[derive(Debug, Clone)] + pub struct TestBlock(pub TestHeaderId, pub Vec); + + impl SourceBlock for TestBlock { + type Hash = TestBlockHash; + type Number = TestBlockNumber; + type Transaction = TestTransaction; + + fn id(&self) -> TestHeaderId { + self.0 + } + + fn transactions(&self) -> Vec { + self.1.clone() + } + } + + #[derive(Debug, Clone)] + pub struct TestTransaction(pub TestTransactionHash); + + impl SourceTransaction for TestTransaction { + type Hash = TestTransactionHash; + + fn hash(&self) -> Self::Hash { + self.0 + } + } + + #[derive(Debug, Clone, PartialEq)] + pub struct TestTransactionProof(pub TestTransactionHash); + + #[derive(Clone)] + pub struct TestTransactionsSource { + pub on_tick: Arc, + pub data: Arc>, + } + + pub struct TestTransactionsSourceData { + pub block: Result, + pub transaction_block: Result, TestError>, + pub proofs_to_fail: HashMap, + } + + impl TestTransactionsSource { + pub fn new(on_tick: Box) -> Self { + Self { + on_tick: Arc::new(on_tick), + data: Arc::new(Mutex::new(TestTransactionsSourceData { + block: Ok(test_block()), + transaction_block: Ok(Some((test_block_id(), 0))), + proofs_to_fail: HashMap::new(), + })), + } + } + } + + #[async_trait] + impl RelayClient for TestTransactionsSource { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + Ok(()) + } + } + + #[async_trait] + impl SourceClient for TestTransactionsSource { + async fn tick(&self) { + (self.on_tick)(&mut *self.data.lock()) + } + + async fn block_by_hash(&self, _: TestBlockHash) -> Result { + self.data.lock().block.clone() + } + + async fn block_by_number(&self, _: TestBlockNumber) -> Result { + self.data.lock().block.clone() + } + + async fn transaction_block(&self, _: &TestTransactionHash) -> Result, TestError> { + self.data.lock().transaction_block.clone() + } + + async fn transaction_proof(&self, block: &TestBlock, index: usize) -> Result { + let tx_hash = block.1[index].hash(); + let proof_error = self.data.lock().proofs_to_fail.get(&tx_hash).cloned(); + if let Some(err) = proof_error { + return Err(err); + } + + Ok(TestTransactionProof(tx_hash)) + } + } + + #[derive(Clone)] + pub struct TestTransactionsTarget { + pub on_tick: Arc, + pub data: Arc>, + } + + pub struct TestTransactionsTargetData { + pub is_header_known: Result, + pub is_header_finalized: Result, + pub best_finalized_header_id: Result, + pub transactions_to_accept: HashSet, + pub submitted_proofs: Vec, + } + + impl TestTransactionsTarget { + pub fn new(on_tick: Box) -> Self { + Self { + on_tick: Arc::new(on_tick), + data: Arc::new(Mutex::new(TestTransactionsTargetData { + is_header_known: Ok(true), + is_header_finalized: Ok(true), + best_finalized_header_id: Ok(test_block_id()), + transactions_to_accept: vec![test_transaction_hash(0)].into_iter().collect(), + submitted_proofs: Vec::new(), + })), + } + } + } + + #[async_trait] + impl RelayClient for TestTransactionsTarget { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + Ok(()) + } + } + + #[async_trait] + impl TargetClient for TestTransactionsTarget { + async fn tick(&self) { + (self.on_tick)(&mut *self.data.lock()) + } + + async fn is_header_known(&self, _: &TestHeaderId) -> Result { + self.data.lock().is_header_known.clone() + } + + async fn is_header_finalized(&self, _: &TestHeaderId) -> Result { + self.data.lock().is_header_finalized.clone() + } + + async fn best_finalized_header_id(&self) -> Result { + self.data.lock().best_finalized_header_id.clone() + } + + async fn filter_transaction_proof(&self, proof: &TestTransactionProof) -> Result { + Ok(self.data.lock().transactions_to_accept.contains(&proof.0)) + } + + async fn submit_transaction_proof(&self, proof: TestTransactionProof) -> Result<(), TestError> { + self.data.lock().submitted_proofs.push(proof); + Ok(()) + } + } + + fn ensure_relay_single_success(source: &TestTransactionsSource, target: &TestTransactionsTarget) { + assert_eq!( + async_std::task::block_on(relay_single_transaction_proof(source, target, test_transaction_hash(0),)), + Ok(()), + ); + assert_eq!( + target.data.lock().submitted_proofs, + vec![TestTransactionProof(test_transaction_hash(0))], + ); + } + + fn ensure_relay_single_failure(source: TestTransactionsSource, target: TestTransactionsTarget) { + assert!(async_std::task::block_on(relay_single_transaction_proof( + &source, + &target, + test_transaction_hash(0), + )) + .is_err(),); + assert!(target.data.lock().submitted_proofs.is_empty()); + } + + #[test] + fn ready_transaction_proof_relayed_immediately() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + ensure_relay_single_success(&source, &target) + } + + #[test] + fn relay_transaction_proof_waits_for_transaction_to_be_mined() { + let source = TestTransactionsSource::new(Box::new(|source_data| { + assert_eq!(source_data.transaction_block, Ok(None)); + source_data.transaction_block = Ok(Some((test_block_id(), 0))); + })); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + // transaction is not yet mined, but will be available after first wait (tick) + source.data.lock().transaction_block = Ok(None); + + ensure_relay_single_success(&source, &target) + } + + #[test] + fn relay_transaction_fails_when_transaction_retrieval_fails() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + source.data.lock().transaction_block = Err(TestError(false)); + + ensure_relay_single_failure(source, target) + } + + #[test] + fn relay_transaction_fails_when_proof_retrieval_fails() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + source + .data + .lock() + .proofs_to_fail + .insert(test_transaction_hash(0), TestError(false)); + + ensure_relay_single_failure(source, target) + } + + #[test] + fn relay_transaction_proof_waits_for_header_to_be_imported() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|target_data| { + assert_eq!(target_data.is_header_known, Ok(false)); + target_data.is_header_known = Ok(true); + })); + + // header is not yet imported, but will be available after first wait (tick) + target.data.lock().is_header_known = Ok(false); + + ensure_relay_single_success(&source, &target) + } + + #[test] + fn relay_transaction_proof_fails_when_is_header_known_fails() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + target.data.lock().is_header_known = Err(TestError(false)); + + ensure_relay_single_failure(source, target) + } + + #[test] + fn relay_transaction_proof_waits_for_header_to_be_finalized() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|target_data| { + assert_eq!(target_data.is_header_finalized, Ok(false)); + target_data.is_header_finalized = Ok(true); + })); + + // header is not yet finalized, but will be available after first wait (tick) + target.data.lock().is_header_finalized = Ok(false); + + ensure_relay_single_success(&source, &target) + } + + #[test] + fn relay_transaction_proof_fails_when_is_header_finalized_fails() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + target.data.lock().is_header_finalized = Err(TestError(false)); + + ensure_relay_single_failure(source, target) + } + + #[test] + fn relay_transaction_proof_fails_when_target_node_rejects_proof() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + target + .data + .lock() + .transactions_to_accept + .remove(&test_transaction_hash(0)); + + ensure_relay_single_success(&source, &target) + } + + fn test_relay_block_transactions( + source: &TestTransactionsSource, + target: &TestTransactionsTarget, + pre_relayed: RelayedBlockTransactions, + ) -> Result { + async_std::task::block_on(relay_block_transactions( + source, + target, + &TestBlock( + test_block_id(), + vec![test_transaction(0), test_transaction(1), test_transaction(2)], + ), + pre_relayed, + )) + .map_err(|(_, transactions)| transactions) + } + + #[test] + fn relay_block_transactions_process_all_transactions() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + // let's only accept tx#1 + target + .data + .lock() + .transactions_to_accept + .remove(&test_transaction_hash(0)); + target + .data + .lock() + .transactions_to_accept + .insert(test_transaction_hash(1)); + + let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); + assert_eq!( + relayed_transactions, + Ok(RelayedBlockTransactions { + processed: 3, + relayed: 1, + failed: 0, + }), + ); + assert_eq!( + target.data.lock().submitted_proofs, + vec![TestTransactionProof(test_transaction_hash(1))], + ); + } + + #[test] + fn relay_block_transactions_ignores_transaction_failure() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + // let's reject proof for tx#0 + source + .data + .lock() + .proofs_to_fail + .insert(test_transaction_hash(0), TestError(false)); + + let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); + assert_eq!( + relayed_transactions, + Ok(RelayedBlockTransactions { + processed: 3, + relayed: 0, + failed: 1, + }), + ); + assert_eq!(target.data.lock().submitted_proofs, vec![],); + } + + #[test] + fn relay_block_transactions_fails_on_connection_error() { + let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed"))); + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed"))); + + // fail with connection error when preparing proof for tx#1 + source + .data + .lock() + .proofs_to_fail + .insert(test_transaction_hash(1), TestError(true)); + + let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default()); + assert_eq!( + relayed_transactions, + Err(RelayedBlockTransactions { + processed: 1, + relayed: 1, + failed: 0, + }), + ); + assert_eq!( + target.data.lock().submitted_proofs, + vec![TestTransactionProof(test_transaction_hash(0))], + ); + + // now do not fail on tx#2 + source.data.lock().proofs_to_fail.clear(); + // and also relay tx#3 + target + .data + .lock() + .transactions_to_accept + .insert(test_transaction_hash(2)); + + let relayed_transactions = test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err()); + assert_eq!( + relayed_transactions, + Ok(RelayedBlockTransactions { + processed: 3, + relayed: 2, + failed: 0, + }), + ); + assert_eq!( + target.data.lock().submitted_proofs, + vec![ + TestTransactionProof(test_transaction_hash(0)), + TestTransactionProof(test_transaction_hash(2)) + ], + ); + } +} diff --git a/polkadot/relays/exchange/src/exchange_loop.rs b/polkadot/relays/exchange/src/exchange_loop.rs new file mode 100644 index 00000000000..b46d34e047a --- /dev/null +++ b/polkadot/relays/exchange/src/exchange_loop.rs @@ -0,0 +1,315 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relaying proofs of exchange transactions. + +use crate::exchange::{ + relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient, TargetClient, + TransactionProofPipeline, +}; +use crate::exchange_loop_metrics::ExchangeLoopMetrics; + +use backoff::backoff::Backoff; +use futures::{future::FutureExt, select}; +use num_traits::One; +use relay_utils::{ + metrics::{GlobalMetrics, MetricsParams}, + retry_backoff, FailedClient, MaybeConnectionError, +}; +use std::future::Future; + +/// Transactions proofs relay state. +#[derive(Debug)] +pub struct TransactionProofsRelayState { + /// Number of last header we have processed so far. + pub best_processed_header_number: BlockNumber, +} + +/// Transactions proofs relay storage. +pub trait TransactionProofsRelayStorage: Clone { + /// Associated block number. + type BlockNumber; + + /// Get relay state. + fn state(&self) -> TransactionProofsRelayState; + /// Update relay state. + fn set_state(&mut self, state: &TransactionProofsRelayState); +} + +/// In-memory storage for auto-relay loop. +#[derive(Debug, Clone)] +pub struct InMemoryStorage { + best_processed_header_number: BlockNumber, +} + +impl InMemoryStorage { + /// Created new in-memory storage with given best processed block number. + pub fn new(best_processed_header_number: BlockNumber) -> Self { + InMemoryStorage { + best_processed_header_number, + } + } +} + +impl TransactionProofsRelayStorage for InMemoryStorage { + type BlockNumber = BlockNumber; + + fn state(&self) -> TransactionProofsRelayState { + TransactionProofsRelayState { + best_processed_header_number: self.best_processed_header_number, + } + } + + fn set_state(&mut self, state: &TransactionProofsRelayState) { + self.best_processed_header_number = state.best_processed_header_number; + } +} + +/// Return prefix that will be used by default to expose Prometheus metrics of the exchange loop. +pub fn metrics_prefix() -> String { + format!("{}_to_{}_Exchange", P::SOURCE_NAME, P::TARGET_NAME) +} + +/// Run proofs synchronization. +pub async fn run( + storage: impl TransactionProofsRelayStorage>, + source_client: impl SourceClient

, + target_client: impl TargetClient

, + metrics_params: MetricsParams, + exit_signal: impl Future, +) -> Result<(), String> { + let exit_signal = exit_signal.shared(); + + relay_utils::relay_loop(source_client, target_client) + .with_metrics(Some(metrics_prefix::

()), metrics_params) + .loop_metric(|registry, prefix| ExchangeLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { + run_until_connection_lost( + storage.clone(), + source_client, + target_client, + metrics, + exit_signal.clone(), + ) + }) + .await +} + +/// Run proofs synchronization. +async fn run_until_connection_lost( + mut storage: impl TransactionProofsRelayStorage>, + source_client: impl SourceClient

, + target_client: impl TargetClient

, + metrics_exch: Option, + exit_signal: impl Future, +) -> Result<(), FailedClient> { + let mut retry_backoff = retry_backoff(); + let mut state = storage.state(); + let mut current_finalized_block = None; + + let exit_signal = exit_signal.fuse(); + + futures::pin_mut!(exit_signal); + + loop { + let iteration_result = run_loop_iteration( + &mut storage, + &source_client, + &target_client, + &mut state, + &mut current_finalized_block, + metrics_exch.as_ref(), + ) + .await; + + if let Err((is_connection_error, failed_client)) = iteration_result { + if is_connection_error { + return Err(failed_client); + } + + let retry_timeout = retry_backoff + .next_backoff() + .unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY); + select! { + _ = async_std::task::sleep(retry_timeout).fuse() => {}, + _ = exit_signal => return Ok(()), + } + } else { + retry_backoff.reset(); + + select! { + _ = source_client.tick().fuse() => {}, + _ = exit_signal => return Ok(()), + } + } + } +} + +/// Run exchange loop until we need to break. +async fn run_loop_iteration( + storage: &mut impl TransactionProofsRelayStorage>, + source_client: &impl SourceClient

, + target_client: &impl TargetClient

, + state: &mut TransactionProofsRelayState>, + current_finalized_block: &mut Option<(P::Block, RelayedBlockTransactions)>, + exchange_loop_metrics: Option<&ExchangeLoopMetrics>, +) -> Result<(), (bool, FailedClient)> { + let best_finalized_header_id = match target_client.best_finalized_header_id().await { + Ok(best_finalized_header_id) => { + log::debug!( + target: "bridge", + "Got best finalized {} block from {} node: {:?}", + P::SOURCE_NAME, + P::TARGET_NAME, + best_finalized_header_id, + ); + + best_finalized_header_id + } + Err(err) => { + log::error!( + target: "bridge", + "Failed to retrieve best {} header id from {} node: {:?}. Going to retry...", + P::SOURCE_NAME, + P::TARGET_NAME, + err, + ); + + return Err((err.is_connection_error(), FailedClient::Target)); + } + }; + + loop { + // if we already have some finalized block body, try to relay its transactions + if let Some((block, relayed_transactions)) = current_finalized_block.take() { + let result = relay_block_transactions(source_client, target_client, &block, relayed_transactions).await; + + match result { + Ok(relayed_transactions) => { + log::info!( + target: "bridge", + "Relay has processed {} block #{}. Total/Relayed/Failed transactions: {}/{}/{}", + P::SOURCE_NAME, + state.best_processed_header_number, + relayed_transactions.processed, + relayed_transactions.relayed, + relayed_transactions.failed, + ); + + state.best_processed_header_number = state.best_processed_header_number + One::one(); + storage.set_state(state); + + if let Some(ref exchange_loop_metrics) = exchange_loop_metrics { + exchange_loop_metrics.update::

( + state.best_processed_header_number, + best_finalized_header_id.0, + relayed_transactions, + ); + } + + // we have just updated state => proceed to next block retrieval + } + Err((failed_client, relayed_transactions)) => { + *current_finalized_block = Some((block, relayed_transactions)); + return Err((true, failed_client)); + } + } + } + + // we may need to retrieve finalized block body from source node + if best_finalized_header_id.0 > state.best_processed_header_number { + let next_block_number = state.best_processed_header_number + One::one(); + let result = source_client.block_by_number(next_block_number).await; + + match result { + Ok(block) => { + *current_finalized_block = Some((block, RelayedBlockTransactions::default())); + + // we have received new finalized block => go back to relay its transactions + continue; + } + Err(err) => { + log::error!( + target: "bridge", + "Failed to retrieve canonical block #{} from {} node: {:?}. Going to retry...", + next_block_number, + P::SOURCE_NAME, + err, + ); + + return Err((err.is_connection_error(), FailedClient::Source)); + } + } + } + + // there are no any transactions we need to relay => wait for new data + return Ok(()); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::exchange::tests::{ + test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof, TestTransactionsSource, + TestTransactionsTarget, + }; + use futures::{future::FutureExt, stream::StreamExt}; + + #[test] + fn exchange_loop_is_able_to_relay_proofs() { + let storage = InMemoryStorage { + best_processed_header_number: 0, + }; + let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed"))); + let target_data = target.data.clone(); + let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); + + let source = TestTransactionsSource::new(Box::new(move |data| { + let transaction1_relayed = target_data + .lock() + .submitted_proofs + .contains(&TestTransactionProof(test_transaction_hash(0))); + let transaction2_relayed = target_data + .lock() + .submitted_proofs + .contains(&TestTransactionProof(test_transaction_hash(1))); + match (transaction1_relayed, transaction2_relayed) { + (true, true) => exit_sender.unbounded_send(()).unwrap(), + (true, false) => { + data.block = Ok(test_next_block()); + target_data.lock().best_finalized_header_id = Ok(test_next_block_id()); + target_data + .lock() + .transactions_to_accept + .insert(test_transaction_hash(1)); + } + _ => (), + } + })); + + let _ = async_std::task::block_on(run( + storage, + source, + target, + MetricsParams::disabled(), + exit_receiver.into_future().map(|(_, _)| ()), + )); + } +} diff --git a/polkadot/relays/exchange/src/exchange_loop_metrics.rs b/polkadot/relays/exchange/src/exchange_loop_metrics.rs new file mode 100644 index 00000000000..82d3e649d43 --- /dev/null +++ b/polkadot/relays/exchange/src/exchange_loop_metrics.rs @@ -0,0 +1,97 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Metrics for currency-exchange relay loop. + +use crate::exchange::{BlockNumberOf, RelayedBlockTransactions, TransactionProofPipeline}; +use relay_utils::metrics::{ + metric_name, register, Counter, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64, +}; + +/// Exchange transactions relay metrics. +#[derive(Clone)] +pub struct ExchangeLoopMetrics { + /// Best finalized block numbers - "processed" and "known". + best_block_numbers: GaugeVec, + /// Number of processed blocks ("total"). + processed_blocks: Counter, + /// Number of processed transactions ("total", "relayed" and "failed"). + processed_transactions: CounterVec, +} + +impl ExchangeLoopMetrics { + /// Create and register exchange loop metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(ExchangeLoopMetrics { + best_block_numbers: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best finalized block numbers", + ), + &["type"], + )?, + registry, + )?, + processed_blocks: register( + Counter::new( + metric_name(prefix, "processed_blocks"), + "Total number of processed blocks", + )?, + registry, + )?, + processed_transactions: register( + CounterVec::new( + Opts::new( + metric_name(prefix, "processed_transactions"), + "Total number of processed transactions", + ), + &["type"], + )?, + registry, + )?, + }) + } +} + +impl ExchangeLoopMetrics { + /// Update metrics when single block is relayed. + pub fn update( + &self, + best_processed_block_number: BlockNumberOf

, + best_known_block_number: BlockNumberOf

, + relayed_transactions: RelayedBlockTransactions, + ) { + self.best_block_numbers + .with_label_values(&["processed"]) + .set(best_processed_block_number.into()); + self.best_block_numbers + .with_label_values(&["known"]) + .set(best_known_block_number.into()); + + self.processed_blocks.inc(); + + self.processed_transactions + .with_label_values(&["total"]) + .inc_by(relayed_transactions.processed as _); + self.processed_transactions + .with_label_values(&["relayed"]) + .inc_by(relayed_transactions.relayed as _); + self.processed_transactions + .with_label_values(&["failed"]) + .inc_by(relayed_transactions.failed as _); + } +} diff --git a/polkadot/relays/exchange/src/lib.rs b/polkadot/relays/exchange/src/lib.rs new file mode 100644 index 00000000000..370f085b4bf --- /dev/null +++ b/polkadot/relays/exchange/src/lib.rs @@ -0,0 +1,26 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relaying [`currency-exchange`](../pallet_bridge_currency_exchange/index.html) application +//! specific data. Currency exchange application allows exchanging tokens between bridged chains. +//! This module provides entrypoints for crafting and submitting (single and multiple) +//! proof-of-exchange-at-source-chain transaction(s) to target chain. + +#![warn(missing_docs)] + +pub mod exchange; +pub mod exchange_loop; +pub mod exchange_loop_metrics; diff --git a/polkadot/relays/finality/Cargo.toml b/polkadot/relays/finality/Cargo.toml new file mode 100644 index 00000000000..944da9837ff --- /dev/null +++ b/polkadot/relays/finality/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "finality-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +description = "Finality proofs relay" + +[dependencies] +async-std = "1.6.5" +async-trait = "0.1.40" +backoff = "0.2" +bp-header-chain = { path = "../../primitives/header-chain" } +futures = "0.3.5" +headers-relay = { path = "../headers" } +log = "0.4.11" +num-traits = "0.2" +relay-utils = { path = "../utils" } + +[dev-dependencies] +parking_lot = "0.11.0" diff --git a/polkadot/relays/finality/src/finality_loop.rs b/polkadot/relays/finality/src/finality_loop.rs new file mode 100644 index 00000000000..aff32e46de4 --- /dev/null +++ b/polkadot/relays/finality/src/finality_loop.rs @@ -0,0 +1,599 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! The loop basically reads all missing headers and their finality proofs from the source client. +//! The proof for the best possible header is then submitted to the target node. The only exception +//! is the mandatory headers, which we always submit to the target node. For such headers, we +//! assume that the persistent proof either exists, or will eventually become available. + +use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; + +use async_trait::async_trait; +use backoff::backoff::Backoff; +use futures::{select, Future, FutureExt, Stream, StreamExt}; +use headers_relay::sync_loop_metrics::SyncLoopMetrics; +use num_traits::{One, Saturating}; +use relay_utils::{ + metrics::{GlobalMetrics, MetricsParams}, + relay_loop::Client as RelayClient, + retry_backoff, FailedClient, MaybeConnectionError, +}; +use std::{ + pin::Pin, + time::{Duration, Instant}, +}; + +/// Finality proof synchronization loop parameters. +#[derive(Debug, Clone)] +pub struct FinalitySyncParams { + /// Interval at which we check updates on both clients. Normally should be larger than + /// `min(source_block_time, target_block_time)`. + /// + /// This parameter may be used to limit transactions rate. Increase the value && you'll get + /// infrequent updates => sparse headers => potential slow down of bridge applications, but pallet storage + /// won't be super large. Decrease the value to near `source_block_time` and you'll get + /// transaction for (almost) every block of the source chain => all source headers will be known + /// to the target chain => bridge applications will run faster, but pallet storage may explode + /// (but if pruning is there, then it's fine). + pub tick: Duration, + /// Number of finality proofs to keep in internal buffer between loop wakeups. + /// + /// While in "major syncing" state, we still read finality proofs from the stream. They're stored + /// in the internal buffer between loop wakeups. When we're close to the tip of the chain, we may + /// meet finality delays if headers are not finalized frequently. So instead of waiting for next + /// finality proof to appear in the stream, we may use existing proof from that buffer. + pub recent_finality_proofs_limit: usize, + /// Timeout before we treat our transactions as lost and restart the whole sync process. + pub stall_timeout: Duration, +} + +/// Source client used in finality synchronization loop. +#[async_trait] +pub trait SourceClient: RelayClient { + /// Stream of new finality proofs. The stream is allowed to miss proofs for some + /// headers, even if those headers are mandatory. + type FinalityProofsStream: Stream; + + /// Get best finalized block number. + async fn best_finalized_block_number(&self) -> Result; + + /// Get canonical header and its finality proof by number. + async fn header_and_finality_proof( + &self, + number: P::Number, + ) -> Result<(P::Header, Option), Self::Error>; + + /// Subscribe to new finality proofs. + async fn finality_proofs(&self) -> Result; +} + +/// Target client used in finality synchronization loop. +#[async_trait] +pub trait TargetClient: RelayClient { + /// Get best finalized source block number. + async fn best_finalized_source_block_number(&self) -> Result; + + /// Submit header finality proof. + async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), Self::Error>; +} + +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +pub fn metrics_prefix() -> String { + format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) +} + +/// Run finality proofs synchronization loop. +pub async fn run( + source_client: impl SourceClient

, + target_client: impl TargetClient

, + sync_params: FinalitySyncParams, + metrics_params: MetricsParams, + exit_signal: impl Future, +) -> Result<(), String> { + let exit_signal = exit_signal.shared(); + relay_utils::relay_loop(source_client, target_client) + .with_metrics(Some(metrics_prefix::

()), metrics_params) + .loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { + run_until_connection_lost( + source_client, + target_client, + sync_params.clone(), + metrics, + exit_signal.clone(), + ) + }) + .await +} + +/// Unjustified headers container. Ordered by header number. +pub(crate) type UnjustifiedHeaders = Vec; +/// Finality proofs container. Ordered by target header number. +pub(crate) type FinalityProofs

= Vec<( +

::Number, +

::FinalityProof, +)>; +/// Reference to finality proofs container. +pub(crate) type FinalityProofsRef<'a, P> = &'a [( +

::Number, +

::FinalityProof, +)]; + +/// Error that may happen inside finality synchronization loop. +#[derive(Debug)] +pub(crate) enum Error { + /// Source client request has failed with given error. + Source(SourceError), + /// Target client request has failed with given error. + Target(TargetError), + /// Finality proof for mandatory header is missing from the source node. + MissingMandatoryFinalityProof(P::Number), + /// The synchronization has stalled. + Stalled, +} + +impl Error +where + P: FinalitySyncPipeline, + SourceError: MaybeConnectionError, + TargetError: MaybeConnectionError, +{ + fn fail_if_connection_error(&self) -> Result<(), FailedClient> { + match *self { + Error::Source(ref error) if error.is_connection_error() => Err(FailedClient::Source), + Error::Target(ref error) if error.is_connection_error() => Err(FailedClient::Target), + Error::Stalled => Err(FailedClient::Both), + _ => Ok(()), + } + } +} + +/// Information about transaction that we have submitted. +#[derive(Debug, Clone)] +struct Transaction { + /// Time when we have submitted this transaction. + pub time: Instant, + /// The number of the header we have submitted. + pub submitted_header_number: Number, +} + +/// Finality proofs stream that may be restarted. +pub(crate) struct RestartableFinalityProofsStream { + /// Flag that the stream needs to be restarted. + pub(crate) needs_restart: bool, + /// The stream itself. + stream: Pin>, +} + +#[cfg(test)] +impl From for RestartableFinalityProofsStream { + fn from(stream: S) -> Self { + RestartableFinalityProofsStream { + needs_restart: false, + stream: Box::pin(stream), + } + } +} + +/// Finality synchronization loop state. +struct FinalityLoopState<'a, P: FinalitySyncPipeline, FinalityProofsStream> { + /// Synchronization loop progress. + progress: &'a mut (Instant, Option), + /// Finality proofs stream. + finality_proofs_stream: &'a mut RestartableFinalityProofsStream, + /// Recent finality proofs that we have read from the stream. + recent_finality_proofs: &'a mut FinalityProofs

, + /// Last transaction that we have submitted to the target node. + last_transaction: Option>, +} + +async fn run_until_connection_lost( + source_client: impl SourceClient

, + target_client: impl TargetClient

, + sync_params: FinalitySyncParams, + metrics_sync: Option, + exit_signal: impl Future, +) -> Result<(), FailedClient> { + let restart_finality_proofs_stream = || async { + source_client.finality_proofs().await.map_err(|error| { + log::error!( + target: "bridge", + "Failed to subscribe to {} justifications: {:?}. Going to reconnect", + P::SOURCE_NAME, + error, + ); + + FailedClient::Source + }) + }; + + let exit_signal = exit_signal.fuse(); + futures::pin_mut!(exit_signal); + + let mut finality_proofs_stream = RestartableFinalityProofsStream { + needs_restart: false, + stream: Box::pin(restart_finality_proofs_stream().await?), + }; + let mut recent_finality_proofs = Vec::new(); + + let mut progress = (Instant::now(), None); + let mut retry_backoff = retry_backoff(); + let mut last_transaction = None; + + loop { + // run loop iteration + let iteration_result = run_loop_iteration( + &source_client, + &target_client, + FinalityLoopState { + progress: &mut progress, + finality_proofs_stream: &mut finality_proofs_stream, + recent_finality_proofs: &mut recent_finality_proofs, + last_transaction: last_transaction.clone(), + }, + &sync_params, + &metrics_sync, + ) + .await; + + // deal with errors + let next_tick = match iteration_result { + Ok(updated_last_transaction) => { + last_transaction = updated_last_transaction; + retry_backoff.reset(); + sync_params.tick + } + Err(error) => { + log::error!(target: "bridge", "Finality sync loop iteration has failed with error: {:?}", error); + error.fail_if_connection_error()?; + retry_backoff + .next_backoff() + .unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY) + } + }; + if finality_proofs_stream.needs_restart { + log::warn!(target: "bridge", "{} finality proofs stream is being restarted", P::SOURCE_NAME); + + finality_proofs_stream.needs_restart = false; + finality_proofs_stream.stream = Box::pin(restart_finality_proofs_stream().await?); + } + + // wait till exit signal, or new source block + select! { + _ = async_std::task::sleep(next_tick).fuse() => {}, + _ = exit_signal => return Ok(()), + } + } +} + +async fn run_loop_iteration( + source_client: &SC, + target_client: &TC, + state: FinalityLoopState<'_, P, SC::FinalityProofsStream>, + sync_params: &FinalitySyncParams, + metrics_sync: &Option, +) -> Result>, Error> +where + P: FinalitySyncPipeline, + SC: SourceClient

, + TC: TargetClient

, +{ + // read best source headers ids from source and target nodes + let best_number_at_source = source_client + .best_finalized_block_number() + .await + .map_err(Error::Source)?; + let best_number_at_target = target_client + .best_finalized_source_block_number() + .await + .map_err(Error::Target)?; + if let Some(ref metrics_sync) = *metrics_sync { + metrics_sync.update_best_block_at_source(best_number_at_source); + metrics_sync.update_best_block_at_target(best_number_at_target); + } + *state.progress = print_sync_progress::

(*state.progress, best_number_at_source, best_number_at_target); + + // if we have already submitted header, then we just need to wait for it + // if we're waiting too much, then we believe our transaction has been lost and restart sync + if let Some(last_transaction) = state.last_transaction { + if best_number_at_target >= last_transaction.submitted_header_number { + // transaction has been mined && we can continue + } else if last_transaction.time.elapsed() > sync_params.stall_timeout { + log::error!( + target: "bridge", + "Finality synchronization from {} to {} has stalled. Going to restart", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + + return Err(Error::Stalled); + } else { + return Ok(Some(last_transaction)); + } + } + + // submit new header if we have something new + match select_header_to_submit( + source_client, + target_client, + state.finality_proofs_stream, + state.recent_finality_proofs, + best_number_at_source, + best_number_at_target, + sync_params, + ) + .await? + { + Some((header, justification)) => { + let new_transaction = Transaction { + time: Instant::now(), + submitted_header_number: header.number(), + }; + + log::debug!( + target: "bridge", + "Going to submit finality proof of {} header #{:?} to {}", + P::SOURCE_NAME, + new_transaction.submitted_header_number, + P::TARGET_NAME, + ); + + target_client + .submit_finality_proof(header, justification) + .await + .map_err(Error::Target)?; + Ok(Some(new_transaction)) + } + None => Ok(None), + } +} + +async fn select_header_to_submit( + source_client: &SC, + target_client: &TC, + finality_proofs_stream: &mut RestartableFinalityProofsStream, + recent_finality_proofs: &mut FinalityProofs

, + best_number_at_source: P::Number, + best_number_at_target: P::Number, + sync_params: &FinalitySyncParams, +) -> Result, Error> +where + P: FinalitySyncPipeline, + SC: SourceClient

, + TC: TargetClient

, +{ + // to see that the loop is progressing + log::trace!( + target: "bridge", + "Considering range of headers ({:?}; {:?}]", + best_number_at_target, + best_number_at_source, + ); + + // read missing headers. if we see that the header schedules GRANDPA change, we need to + // submit this header + let selected_finality_proof = read_missing_headers::( + source_client, + target_client, + best_number_at_source, + best_number_at_target, + ) + .await?; + let (mut unjustified_headers, mut selected_finality_proof) = match selected_finality_proof { + SelectedFinalityProof::Mandatory(header, finality_proof) => return Ok(Some((header, finality_proof))), + SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => { + (unjustified_headers, Some((header, finality_proof))) + } + SelectedFinalityProof::None(unjustified_headers) => (unjustified_headers, None), + }; + + // all headers that are missing from the target client are non-mandatory + // => even if we have already selected some header and its persistent finality proof, + // we may try to select better header by reading non-persistent proofs from the stream + read_finality_proofs_from_stream::(finality_proofs_stream, recent_finality_proofs); + selected_finality_proof = select_better_recent_finality_proof::

( + recent_finality_proofs, + &mut unjustified_headers, + selected_finality_proof, + ); + + // remove obsolete 'recent' finality proofs + keep its size under certain limit + let oldest_finality_proof_to_keep = selected_finality_proof + .as_ref() + .map(|(header, _)| header.number()) + .unwrap_or(best_number_at_target); + prune_recent_finality_proofs::

( + oldest_finality_proof_to_keep, + recent_finality_proofs, + sync_params.recent_finality_proofs_limit, + ); + + Ok(selected_finality_proof) +} + +/// Finality proof that has been selected by the `read_missing_headers` function. +pub(crate) enum SelectedFinalityProof { + /// Mandatory header and its proof has been selected. We shall submit proof for this header. + Mandatory(Header, FinalityProof), + /// Regular header and its proof has been selected. We may submit this proof, or proof for + /// some better header. + Regular(UnjustifiedHeaders

, Header, FinalityProof), + /// We haven't found any missing header with persistent proof at the target client. + None(UnjustifiedHeaders
), +} + +/// Read missing headers and their persistent finality proofs from the target client. +/// +/// If we have found some header with known proof, it is returned. +/// Otherwise, `SelectedFinalityProof::None` is returned. +/// +/// Unless we have found mandatory header, all missing headers are collected and returned. +pub(crate) async fn read_missing_headers, TC: TargetClient

>( + source_client: &SC, + _target_client: &TC, + best_number_at_source: P::Number, + best_number_at_target: P::Number, +) -> Result, Error> { + let mut unjustified_headers = Vec::new(); + let mut selected_finality_proof = None; + let mut header_number = best_number_at_target + One::one(); + while header_number <= best_number_at_source { + let (header, finality_proof) = source_client + .header_and_finality_proof(header_number) + .await + .map_err(Error::Source)?; + let is_mandatory = header.is_mandatory(); + + match (is_mandatory, finality_proof) { + (true, Some(finality_proof)) => { + log::trace!(target: "bridge", "Header {:?} is mandatory", header_number); + return Ok(SelectedFinalityProof::Mandatory(header, finality_proof)); + } + (true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())), + (false, Some(finality_proof)) => { + log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number); + unjustified_headers.clear(); + selected_finality_proof = Some((header, finality_proof)); + } + (false, None) => { + unjustified_headers.push(header); + } + } + + header_number = header_number + One::one(); + } + + Ok(match selected_finality_proof { + Some((header, proof)) => SelectedFinalityProof::Regular(unjustified_headers, header, proof), + None => SelectedFinalityProof::None(unjustified_headers), + }) +} + +/// Read finality proofs from the stream. +pub(crate) fn read_finality_proofs_from_stream>( + finality_proofs_stream: &mut RestartableFinalityProofsStream, + recent_finality_proofs: &mut FinalityProofs

, +) { + loop { + let next_proof = finality_proofs_stream.stream.next(); + let finality_proof = match next_proof.now_or_never() { + Some(Some(finality_proof)) => finality_proof, + Some(None) => { + finality_proofs_stream.needs_restart = true; + break; + } + None => break, + }; + + recent_finality_proofs.push((finality_proof.target_header_number(), finality_proof)); + } +} + +/// Try to select better header and its proof, given finality proofs that we +/// have recently read from the stream. +pub(crate) fn select_better_recent_finality_proof( + recent_finality_proofs: FinalityProofsRef

, + unjustified_headers: &mut UnjustifiedHeaders, + selected_finality_proof: Option<(P::Header, P::FinalityProof)>, +) -> Option<(P::Header, P::FinalityProof)> { + if unjustified_headers.is_empty() || recent_finality_proofs.is_empty() { + return selected_finality_proof; + } + + const NOT_EMPTY_PROOF: &str = "we have checked that the vec is not empty; qed"; + + // we need proofs for headers in range unjustified_range_begin..=unjustified_range_end + let unjustified_range_begin = unjustified_headers.first().expect(NOT_EMPTY_PROOF).number(); + let unjustified_range_end = unjustified_headers.last().expect(NOT_EMPTY_PROOF).number(); + + // we have proofs for headers in range buffered_range_begin..=buffered_range_end + let buffered_range_begin = recent_finality_proofs.first().expect(NOT_EMPTY_PROOF).0; + let buffered_range_end = recent_finality_proofs.last().expect(NOT_EMPTY_PROOF).0; + + // we have two ranges => find intersection + let intersection_begin = std::cmp::max(unjustified_range_begin, buffered_range_begin); + let intersection_end = std::cmp::min(unjustified_range_end, buffered_range_end); + let intersection = intersection_begin..=intersection_end; + + // find last proof from intersection + let selected_finality_proof_index = recent_finality_proofs + .binary_search_by_key(intersection.end(), |(number, _)| *number) + .unwrap_or_else(|index| index.saturating_sub(1)); + let (selected_header_number, finality_proof) = &recent_finality_proofs[selected_finality_proof_index]; + if !intersection.contains(selected_header_number) { + return selected_finality_proof; + } + + // now remove all obsolete headers and extract selected header + let selected_header_position = unjustified_headers + .binary_search_by_key(selected_header_number, |header| header.number()) + .expect("unjustified_headers contain all headers from intersection; qed"); + let selected_header = unjustified_headers.swap_remove(selected_header_position); + Some((selected_header, finality_proof.clone())) +} + +pub(crate) fn prune_recent_finality_proofs( + justified_header_number: P::Number, + recent_finality_proofs: &mut FinalityProofs

, + recent_finality_proofs_limit: usize, +) { + let position = + recent_finality_proofs.binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number); + + // remove all obsolete elements + *recent_finality_proofs = recent_finality_proofs.split_off( + position + .map(|position| position + 1) + .unwrap_or_else(|position| position), + ); + + // now - limit vec by size + let split_index = recent_finality_proofs + .len() + .saturating_sub(recent_finality_proofs_limit); + *recent_finality_proofs = recent_finality_proofs.split_off(split_index); +} + +fn print_sync_progress( + progress_context: (Instant, Option), + best_number_at_source: P::Number, + best_number_at_target: P::Number, +) -> (Instant, Option) { + let (prev_time, prev_best_number_at_target) = progress_context; + let now = Instant::now(); + + let need_update = now - prev_time > Duration::from_secs(10) + || prev_best_number_at_target + .map(|prev_best_number_at_target| { + best_number_at_target.saturating_sub(prev_best_number_at_target) > 10.into() + }) + .unwrap_or(true); + + if !need_update { + return (prev_time, prev_best_number_at_target); + } + + log::info!( + target: "bridge", + "Synced {:?} of {:?} headers", + best_number_at_target, + best_number_at_source, + ); + (now, Some(best_number_at_target)) +} diff --git a/polkadot/relays/finality/src/finality_loop_tests.rs b/polkadot/relays/finality/src/finality_loop_tests.rs new file mode 100644 index 00000000000..eedd9020033 --- /dev/null +++ b/polkadot/relays/finality/src/finality_loop_tests.rs @@ -0,0 +1,404 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Tests for finality synchronization loop. + +#![cfg(test)] + +use crate::finality_loop::{ + prune_recent_finality_proofs, read_finality_proofs_from_stream, run, select_better_recent_finality_proof, + FinalityProofs, FinalitySyncParams, SourceClient, TargetClient, +}; +use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader}; + +use async_trait::async_trait; +use futures::{FutureExt, Stream, StreamExt}; +use parking_lot::Mutex; +use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError}; +use std::{collections::HashMap, pin::Pin, sync::Arc, time::Duration}; + +type IsMandatory = bool; +type TestNumber = u64; + +#[derive(Debug, Clone)] +enum TestError { + NonConnection, +} + +impl MaybeConnectionError for TestError { + fn is_connection_error(&self) -> bool { + false + } +} + +#[derive(Debug, Clone)] +struct TestFinalitySyncPipeline; + +impl FinalitySyncPipeline for TestFinalitySyncPipeline { + const SOURCE_NAME: &'static str = "TestSource"; + const TARGET_NAME: &'static str = "TestTarget"; + + type Hash = u64; + type Number = TestNumber; + type Header = TestSourceHeader; + type FinalityProof = TestFinalityProof; +} + +#[derive(Debug, Clone, PartialEq)] +struct TestSourceHeader(IsMandatory, TestNumber); + +impl SourceHeader for TestSourceHeader { + fn number(&self) -> TestNumber { + self.1 + } + + fn is_mandatory(&self) -> bool { + self.0 + } +} + +#[derive(Debug, Clone, PartialEq)] +struct TestFinalityProof(TestNumber); + +impl FinalityProof for TestFinalityProof { + fn target_header_number(&self) -> TestNumber { + self.0 + } +} + +#[derive(Debug, Clone, Default)] +struct ClientsData { + source_best_block_number: TestNumber, + source_headers: HashMap)>, + source_proofs: Vec, + + target_best_block_number: TestNumber, + target_headers: Vec<(TestSourceHeader, TestFinalityProof)>, +} + +#[derive(Clone)] +struct TestSourceClient { + on_method_call: Arc, + data: Arc>, +} + +#[async_trait] +impl RelayClient for TestSourceClient { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + unreachable!() + } +} + +#[async_trait] +impl SourceClient for TestSourceClient { + type FinalityProofsStream = Pin>>; + + async fn best_finalized_block_number(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + Ok(data.source_best_block_number) + } + + async fn header_and_finality_proof( + &self, + number: TestNumber, + ) -> Result<(TestSourceHeader, Option), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + data.source_headers + .get(&number) + .cloned() + .ok_or(TestError::NonConnection) + } + + async fn finality_proofs(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + Ok(futures::stream::iter(data.source_proofs.clone()).boxed()) + } +} + +#[derive(Clone)] +struct TestTargetClient { + on_method_call: Arc, + data: Arc>, +} + +#[async_trait] +impl RelayClient for TestTargetClient { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + unreachable!() + } +} + +#[async_trait] +impl TargetClient for TestTargetClient { + async fn best_finalized_source_block_number(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + Ok(data.target_best_block_number) + } + + async fn submit_finality_proof(&self, header: TestSourceHeader, proof: TestFinalityProof) -> Result<(), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(&mut *data); + data.target_best_block_number = header.number(); + data.target_headers.push((header, proof)); + Ok(()) + } +} + +fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static) -> ClientsData { + let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); + let internal_state_function: Arc = Arc::new(move |data| { + if state_function(data) { + exit_sender.unbounded_send(()).unwrap(); + } + }); + let clients_data = Arc::new(Mutex::new(ClientsData { + source_best_block_number: 10, + source_headers: vec![ + (6, (TestSourceHeader(false, 6), None)), + (7, (TestSourceHeader(false, 7), Some(TestFinalityProof(7)))), + (8, (TestSourceHeader(true, 8), Some(TestFinalityProof(8)))), + (9, (TestSourceHeader(false, 9), Some(TestFinalityProof(9)))), + (10, (TestSourceHeader(false, 10), None)), + ] + .into_iter() + .collect(), + source_proofs: vec![TestFinalityProof(12), TestFinalityProof(14)], + + target_best_block_number: 5, + target_headers: vec![], + })); + let source_client = TestSourceClient { + on_method_call: internal_state_function.clone(), + data: clients_data.clone(), + }; + let target_client = TestTargetClient { + on_method_call: internal_state_function, + data: clients_data.clone(), + }; + let sync_params = FinalitySyncParams { + tick: Duration::from_secs(0), + recent_finality_proofs_limit: 1024, + stall_timeout: Duration::from_secs(1), + }; + + let _ = async_std::task::block_on(run( + source_client, + target_client, + sync_params, + MetricsParams::disabled(), + exit_receiver.into_future().map(|(_, _)| ()), + )); + + let clients_data = clients_data.lock().clone(); + clients_data +} + +#[test] +fn finality_sync_loop_works() { + let client_data = run_sync_loop(|data| { + // header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, because + // header#8 has persistent finality proof && it is mandatory => it is submitted + // header#9 has persistent finality proof, but it isn't mandatory => it is submitted, because + // there are no more persistent finality proofs + // + // once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from the stream + if data.target_best_block_number == 9 { + data.source_best_block_number = 14; + data.source_headers.insert(11, (TestSourceHeader(false, 11), None)); + data.source_headers + .insert(12, (TestSourceHeader(false, 12), Some(TestFinalityProof(12)))); + data.source_headers.insert(13, (TestSourceHeader(false, 13), None)); + data.source_headers + .insert(14, (TestSourceHeader(false, 14), Some(TestFinalityProof(14)))); + } + // once this ^^^ is done, we generate more blocks && read persistent proof for block 16 + if data.target_best_block_number == 14 { + data.source_best_block_number = 17; + data.source_headers.insert(15, (TestSourceHeader(false, 15), None)); + data.source_headers + .insert(16, (TestSourceHeader(false, 16), Some(TestFinalityProof(16)))); + data.source_headers.insert(17, (TestSourceHeader(false, 17), None)); + } + + data.target_best_block_number == 16 + }); + + assert_eq!( + client_data.target_headers, + vec![ + // before adding 11..14: finality proof for mandatory header#8 + (TestSourceHeader(true, 8), TestFinalityProof(8)), + // before adding 11..14: persistent finality proof for non-mandatory header#9 + (TestSourceHeader(false, 9), TestFinalityProof(9)), + // after adding 11..14: ephemeral finality proof for non-mandatory header#14 + (TestSourceHeader(false, 14), TestFinalityProof(14)), + // after adding 15..17: persistent finality proof for non-mandatory header#16 + (TestSourceHeader(false, 16), TestFinalityProof(16)), + ], + ); +} + +#[test] +fn select_better_recent_finality_proof_works() { + // if there are no unjustified headers, nothing is changed + assert_eq!( + select_better_recent_finality_proof::( + &[(5, TestFinalityProof(5))], + &mut vec![], + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + + // if there are no recent finality proofs, nothing is changed + assert_eq!( + select_better_recent_finality_proof::( + &[], + &mut vec![TestSourceHeader(false, 5)], + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + + // if there's no intersection between recent finality proofs and unjustified headers, nothing is changed + let mut unjustified_headers = vec![TestSourceHeader(false, 9), TestSourceHeader(false, 10)]; + assert_eq!( + select_better_recent_finality_proof::( + &[(1, TestFinalityProof(1)), (4, TestFinalityProof(4))], + &mut unjustified_headers, + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + + // if there's intersection between recent finality proofs and unjustified headers, but there are no + // proofs in this intersection, nothing is changed + let mut unjustified_headers = vec![ + TestSourceHeader(false, 8), + TestSourceHeader(false, 9), + TestSourceHeader(false, 10), + ]; + assert_eq!( + select_better_recent_finality_proof::( + &[(7, TestFinalityProof(7)), (11, TestFinalityProof(11))], + &mut unjustified_headers, + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ); + assert_eq!( + unjustified_headers, + vec![ + TestSourceHeader(false, 8), + TestSourceHeader(false, 9), + TestSourceHeader(false, 10) + ] + ); + + // if there's intersection between recent finality proofs and unjustified headers and there's + // a proof in this intersection: + // - this better (last from intersection) proof is selected; + // - 'obsolete' unjustified headers are pruned. + let mut unjustified_headers = vec![ + TestSourceHeader(false, 8), + TestSourceHeader(false, 9), + TestSourceHeader(false, 10), + ]; + assert_eq!( + select_better_recent_finality_proof::( + &[(7, TestFinalityProof(7)), (9, TestFinalityProof(9))], + &mut unjustified_headers, + Some((TestSourceHeader(false, 2), TestFinalityProof(2))), + ), + Some((TestSourceHeader(false, 9), TestFinalityProof(9))), + ); +} + +#[test] +fn read_finality_proofs_from_stream_works() { + // when stream is currently empty, nothing is changed + let mut recent_finality_proofs = vec![(1, TestFinalityProof(1))]; + let mut stream = futures::stream::pending().into(); + read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); + assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1))]); + assert_eq!(stream.needs_restart, false); + + // when stream has entry with target, it is added to the recent proofs container + let mut stream = futures::stream::iter(vec![TestFinalityProof(4)]) + .chain(futures::stream::pending()) + .into(); + read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); + assert_eq!( + recent_finality_proofs, + vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] + ); + assert_eq!(stream.needs_restart, false); + + // when stream has ended, we'll need to restart it + let mut stream = futures::stream::empty().into(); + read_finality_proofs_from_stream::(&mut stream, &mut recent_finality_proofs); + assert_eq!( + recent_finality_proofs, + vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))] + ); + assert_eq!(stream.needs_restart, true); +} + +#[test] +fn prune_recent_finality_proofs_works() { + let original_recent_finality_proofs: FinalityProofs = vec![ + (10, TestFinalityProof(10)), + (13, TestFinalityProof(13)), + (15, TestFinalityProof(15)), + (17, TestFinalityProof(17)), + (19, TestFinalityProof(19)), + ] + .into_iter() + .collect(); + + // when there's proof for justified header in the vec + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(10, &mut recent_finality_proofs, 1024); + assert_eq!(&original_recent_finality_proofs[1..], recent_finality_proofs,); + + // when there are no proof for justified header in the vec + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(11, &mut recent_finality_proofs, 1024); + assert_eq!(&original_recent_finality_proofs[1..], recent_finality_proofs,); + + // when there are too many entries after initial prune && they also need to be pruned + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(10, &mut recent_finality_proofs, 2); + assert_eq!(&original_recent_finality_proofs[3..], recent_finality_proofs,); + + // when last entry is pruned + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(19, &mut recent_finality_proofs, 2); + assert_eq!(&original_recent_finality_proofs[5..], recent_finality_proofs,); + + // when post-last entry is pruned + let mut recent_finality_proofs = original_recent_finality_proofs.clone(); + prune_recent_finality_proofs::(20, &mut recent_finality_proofs, 2); + assert_eq!(&original_recent_finality_proofs[5..], recent_finality_proofs,); +} diff --git a/polkadot/relays/finality/src/lib.rs b/polkadot/relays/finality/src/lib.rs new file mode 100644 index 00000000000..d5048aa1607 --- /dev/null +++ b/polkadot/relays/finality/src/lib.rs @@ -0,0 +1,53 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! This crate has single entrypoint to run synchronization loop that is built around finality +//! proofs, as opposed to headers synchronization loop, which is built around headers. The headers +//! are still submitted to the target node, but are treated as auxiliary data as we are not trying +//! to submit all source headers to the target node. + +pub use crate::finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient}; + +use bp_header_chain::FinalityProof; +use std::fmt::Debug; + +mod finality_loop; +mod finality_loop_tests; + +/// Finality proofs synchronization pipeline. +pub trait FinalitySyncPipeline: Clone + Debug + Send + Sync { + /// Name of the finality proofs source. + const SOURCE_NAME: &'static str; + /// Name of the finality proofs target. + const TARGET_NAME: &'static str; + + /// Headers we're syncing are identified by this hash. + type Hash: Eq + Clone + Copy + Send + Sync + Debug; + /// Headers we're syncing are identified by this number. + type Number: relay_utils::BlockNumberBase; + /// Type of header that we're syncing. + type Header: SourceHeader; + /// Finality proof type. + type FinalityProof: FinalityProof; +} + +/// Header that we're receiving from source node. +pub trait SourceHeader: Clone + Debug + PartialEq + Send + Sync { + /// Returns number of header. + fn number(&self) -> Number; + /// Returns true if this header needs to be submitted to target node. + fn is_mandatory(&self) -> bool; +} diff --git a/polkadot/relays/headers/Cargo.toml b/polkadot/relays/headers/Cargo.toml new file mode 100644 index 00000000000..31d3166a997 --- /dev/null +++ b/polkadot/relays/headers/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "headers-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +async-std = "1.6.5" +async-trait = "0.1.40" +backoff = "0.2" +futures = "0.3.5" +linked-hash-map = "0.5.3" +log = "0.4.11" +num-traits = "0.2" +parking_lot = "0.11.0" +relay-utils = { path = "../utils" } diff --git a/polkadot/relays/headers/src/headers.rs b/polkadot/relays/headers/src/headers.rs new file mode 100644 index 00000000000..be3e2cb6e6d --- /dev/null +++ b/polkadot/relays/headers/src/headers.rs @@ -0,0 +1,1721 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Headers queue - the intermediate buffer that is filled when headers are read +//! from the source chain. Headers are removed from the queue once they become +//! known to the target chain. Inside, there are several sub-queues, where headers +//! may stay until source/target chain state isn't updated. When a header reaches the +//! `ready` sub-queue, it may be submitted to the target chain. + +use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader}; + +use linked_hash_map::LinkedHashMap; +use num_traits::{One, Zero}; +use relay_utils::HeaderId; +use std::{ + collections::{btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap, HashSet}, + time::{Duration, Instant}, +}; + +type HeadersQueue

= + BTreeMap<

::Number, HashMap<

::Hash, QueuedHeader

>>; +type SyncedChildren

= + BTreeMap<

::Number, HashMap<

::Hash, HashSet>>>; +type KnownHeaders

= + BTreeMap<

::Number, HashMap<

::Hash, HeaderStatus>>; + +/// We're trying to fetch completion data for single header at this interval. +const RETRY_FETCH_COMPLETION_INTERVAL: Duration = Duration::from_secs(20); + +/// Headers queue. +#[derive(Debug)] +pub struct QueuedHeaders { + /// Headers that are received from source node, but we (native sync code) have + /// never seen their parents. So we need to check if we can/should submit this header. + maybe_orphan: HeadersQueue

, + /// Headers that are received from source node, and we (native sync code) have + /// checked that Substrate runtime doesn't know their parents. So we need to submit parents + /// first. + orphan: HeadersQueue

, + /// Headers that are ready to be submitted to target node, but we need to check + /// whether submission requires extra data to be provided. + maybe_extra: HeadersQueue

, + /// Headers that are ready to be submitted to target node, but we need to retrieve + /// extra data first. + extra: HeadersQueue

, + /// Headers that are ready to be submitted to target node. + ready: HeadersQueue

, + /// Headers that are ready to be submitted to target node, but their ancestor is incomplete. + /// Thus we're waiting for these ancestors to be completed first. + /// Note that the incomplete header itself is synced and it isn't in this queue. + incomplete: HeadersQueue

, + /// Headers that are (we believe) currently submitted to target node by our, + /// not-yet mined transactions. + submitted: HeadersQueue

, + /// Synced headers childrens. We need it to support case when header is synced, but some of + /// its parents are incomplete. + synced_children: SyncedChildren

, + /// Pointers to all headers that we ever seen and we believe we can touch in the future. + known_headers: KnownHeaders

, + /// Headers that are waiting for completion data from source node. Mapped (and auto-sorted + /// by) to the last fetch time. + incomplete_headers: LinkedHashMap, Option>, + /// Headers that are waiting to be completed at target node. Auto-sorted by insertion time. + completion_data: LinkedHashMap, P::Completion>, + /// Best synced block number. + best_synced_number: P::Number, + /// Pruned blocks border. We do not store or accept any blocks with number less than + /// this number. + prune_border: P::Number, +} + +/// Header completion data. +#[derive(Debug)] +struct HeaderCompletion { + /// Last time when we tried to upload completion data to target node, if ever. + pub last_upload_time: Option, + /// Completion data. + pub completion: Completion, +} + +impl Default for QueuedHeaders

{ + fn default() -> Self { + QueuedHeaders { + maybe_orphan: HeadersQueue::new(), + orphan: HeadersQueue::new(), + maybe_extra: HeadersQueue::new(), + extra: HeadersQueue::new(), + ready: HeadersQueue::new(), + incomplete: HeadersQueue::new(), + submitted: HeadersQueue::new(), + synced_children: SyncedChildren::

::new(), + known_headers: KnownHeaders::

::new(), + incomplete_headers: LinkedHashMap::new(), + completion_data: LinkedHashMap::new(), + best_synced_number: Zero::zero(), + prune_border: Zero::zero(), + } + } +} + +impl QueuedHeaders

{ + /// Returns prune border. + #[cfg(test)] + pub fn prune_border(&self) -> P::Number { + self.prune_border + } + + /// Returns number of headers that are currently in given queue. + pub fn headers_in_status(&self, status: HeaderStatus) -> usize { + match status { + HeaderStatus::Unknown | HeaderStatus::Synced => 0, + HeaderStatus::MaybeOrphan => self + .maybe_orphan + .values() + .fold(0, |total, headers| total + headers.len()), + HeaderStatus::Orphan => self.orphan.values().fold(0, |total, headers| total + headers.len()), + HeaderStatus::MaybeExtra => self + .maybe_extra + .values() + .fold(0, |total, headers| total + headers.len()), + HeaderStatus::Extra => self.extra.values().fold(0, |total, headers| total + headers.len()), + HeaderStatus::Ready => self.ready.values().fold(0, |total, headers| total + headers.len()), + HeaderStatus::Incomplete => self.incomplete.values().fold(0, |total, headers| total + headers.len()), + HeaderStatus::Submitted => self.submitted.values().fold(0, |total, headers| total + headers.len()), + } + } + + /// Returns number of headers that are currently in the queue. + pub fn total_headers(&self) -> usize { + self.maybe_orphan + .values() + .fold(0, |total, headers| total + headers.len()) + + self.orphan.values().fold(0, |total, headers| total + headers.len()) + + self + .maybe_extra + .values() + .fold(0, |total, headers| total + headers.len()) + + self.extra.values().fold(0, |total, headers| total + headers.len()) + + self.ready.values().fold(0, |total, headers| total + headers.len()) + + self.incomplete.values().fold(0, |total, headers| total + headers.len()) + } + + /// Returns number of best block in the queue. + pub fn best_queued_number(&self) -> P::Number { + std::cmp::max( + self.maybe_orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), + std::cmp::max( + self.orphan.keys().next_back().cloned().unwrap_or_else(Zero::zero), + std::cmp::max( + self.maybe_extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), + std::cmp::max( + self.extra.keys().next_back().cloned().unwrap_or_else(Zero::zero), + std::cmp::max( + self.ready.keys().next_back().cloned().unwrap_or_else(Zero::zero), + std::cmp::max( + self.incomplete.keys().next_back().cloned().unwrap_or_else(Zero::zero), + self.submitted.keys().next_back().cloned().unwrap_or_else(Zero::zero), + ), + ), + ), + ), + ), + ) + } + + /// Returns number of best synced block we have ever seen. It is either less + /// than `best_queued_number()`, or points to last synced block if queue is empty. + pub fn best_synced_number(&self) -> P::Number { + self.best_synced_number + } + + /// Returns synchronization status of the header. + pub fn status(&self, id: &HeaderIdOf

) -> HeaderStatus { + self.known_headers + .get(&id.0) + .and_then(|x| x.get(&id.1)) + .cloned() + .unwrap_or(HeaderStatus::Unknown) + } + + /// Get oldest header from given queue. + pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader

> { + match status { + HeaderStatus::Unknown | HeaderStatus::Synced => None, + HeaderStatus::MaybeOrphan => oldest_header(&self.maybe_orphan), + HeaderStatus::Orphan => oldest_header(&self.orphan), + HeaderStatus::MaybeExtra => oldest_header(&self.maybe_extra), + HeaderStatus::Extra => oldest_header(&self.extra), + HeaderStatus::Ready => oldest_header(&self.ready), + HeaderStatus::Incomplete => oldest_header(&self.incomplete), + HeaderStatus::Submitted => oldest_header(&self.submitted), + } + } + + /// Get oldest headers from given queue until functor will return false. + pub fn headers( + &self, + status: HeaderStatus, + f: impl FnMut(&QueuedHeader

) -> bool, + ) -> Option>> { + match status { + HeaderStatus::Unknown | HeaderStatus::Synced => None, + HeaderStatus::MaybeOrphan => oldest_headers(&self.maybe_orphan, f), + HeaderStatus::Orphan => oldest_headers(&self.orphan, f), + HeaderStatus::MaybeExtra => oldest_headers(&self.maybe_extra, f), + HeaderStatus::Extra => oldest_headers(&self.extra, f), + HeaderStatus::Ready => oldest_headers(&self.ready, f), + HeaderStatus::Incomplete => oldest_headers(&self.incomplete, f), + HeaderStatus::Submitted => oldest_headers(&self.submitted, f), + } + } + + /// Appends new header, received from the source node, to the queue. + pub fn header_response(&mut self, header: P::Header) { + let id = header.id(); + let status = self.status(&id); + if status != HeaderStatus::Unknown { + log::debug!( + target: "bridge", + "Ignoring new {} header: {:?}. Status is {:?}.", + P::SOURCE_NAME, + id, + status, + ); + return; + } + + if id.0 < self.prune_border { + log::debug!( + target: "bridge", + "Ignoring ancient new {} header: {:?}.", + P::SOURCE_NAME, + id, + ); + return; + } + + let parent_id = header.parent_id(); + let parent_status = self.status(&parent_id); + let header = QueuedHeader::new(header); + + let status = match parent_status { + HeaderStatus::Unknown | HeaderStatus::MaybeOrphan => { + insert_header(&mut self.maybe_orphan, id, header); + HeaderStatus::MaybeOrphan + } + HeaderStatus::Orphan => { + insert_header(&mut self.orphan, id, header); + HeaderStatus::Orphan + } + HeaderStatus::MaybeExtra + | HeaderStatus::Extra + | HeaderStatus::Ready + | HeaderStatus::Incomplete + | HeaderStatus::Submitted + | HeaderStatus::Synced => { + insert_header(&mut self.maybe_extra, id, header); + HeaderStatus::MaybeExtra + } + }; + + self.known_headers.entry(id.0).or_default().insert(id.1, status); + log::debug!( + target: "bridge", + "Queueing new {} header: {:?}. Queue: {:?}.", + P::SOURCE_NAME, + id, + status, + ); + } + + /// Receive best header from the target node. + pub fn target_best_header_response(&mut self, id: &HeaderIdOf

) { + self.header_synced(id) + } + + /// Receive target node response for MaybeOrphan request. + pub fn maybe_orphan_response(&mut self, id: &HeaderIdOf

, response: bool) { + if !response { + move_header_descendants::

( + &mut [&mut self.maybe_orphan], + &mut self.orphan, + &mut self.known_headers, + HeaderStatus::Orphan, + &id, + ); + return; + } + + move_header_descendants::

( + &mut [&mut self.maybe_orphan, &mut self.orphan], + &mut self.maybe_extra, + &mut self.known_headers, + HeaderStatus::MaybeExtra, + &id, + ); + } + + /// Receive target node response for MaybeExtra request. + pub fn maybe_extra_response(&mut self, id: &HeaderIdOf

, response: bool) { + let (destination_status, destination_queue) = if response { + (HeaderStatus::Extra, &mut self.extra) + } else if self.is_parent_incomplete(id) { + (HeaderStatus::Incomplete, &mut self.incomplete) + } else { + (HeaderStatus::Ready, &mut self.ready) + }; + + move_header( + &mut self.maybe_extra, + destination_queue, + &mut self.known_headers, + destination_status, + &id, + |header| header, + ); + } + + /// Receive extra from source node. + pub fn extra_response(&mut self, id: &HeaderIdOf

, extra: P::Extra) { + let (destination_status, destination_queue) = if self.is_parent_incomplete(id) { + (HeaderStatus::Incomplete, &mut self.incomplete) + } else { + (HeaderStatus::Ready, &mut self.ready) + }; + + // move header itself from extra to ready queue + move_header( + &mut self.extra, + destination_queue, + &mut self.known_headers, + destination_status, + id, + |header| header.set_extra(extra), + ); + } + + /// Receive completion response from source node. + pub fn completion_response(&mut self, id: &HeaderIdOf

, completion: Option) { + let completion = match completion { + Some(completion) => completion, + None => { + log::debug!( + target: "bridge", + "{} Node is still missing completion data for header: {:?}. Will retry later.", + P::SOURCE_NAME, + id, + ); + + return; + } + }; + + // do not remove from `incomplete_headers` here, because otherwise we'll miss + // completion 'notification' + // this could lead to duplicate completion retrieval (if completion transaction isn't mined + // for too long) + // + // instead, we're moving entry to the end of the queue, so that completion data won't be + // refetched instantly + if self.incomplete_headers.remove(id).is_some() { + log::debug!( + target: "bridge", + "Received completion data from {} for header: {:?}", + P::SOURCE_NAME, + id, + ); + + self.completion_data.insert(*id, completion); + self.incomplete_headers.insert(*id, Some(Instant::now())); + } + } + + /// When header is submitted to target node. + pub fn headers_submitted(&mut self, ids: Vec>) { + for id in ids { + move_header( + &mut self.ready, + &mut self.submitted, + &mut self.known_headers, + HeaderStatus::Submitted, + &id, + |header| header, + ); + } + } + + /// When header completion data is sent to target node. + pub fn header_completed(&mut self, id: &HeaderIdOf

) { + if self.completion_data.remove(id).is_some() { + log::debug!( + target: "bridge", + "Sent completion data to {} for header: {:?}", + P::TARGET_NAME, + id, + ); + + // transaction can be dropped by target chain nodes => it would never be mined + // + // in current implementation the sync loop would wait for some time && if best + // **source** header won't change on **target** node, then the sync will be restarted + // => we'll resubmit the same completion data again (the same is true for submitted + // headers) + // + // the other option would be to track emitted transactions at least on target node, + // but it won't give us 100% guarantee anyway + // + // => we're just dropping completion data just after it has been submitted + } + } + + /// Marks given headers incomplete. + pub fn add_incomplete_headers(&mut self, make_header_incomplete: bool, new_incomplete_headers: Vec>) { + for new_incomplete_header in new_incomplete_headers { + if make_header_incomplete { + self.header_synced(&new_incomplete_header); + } + + let move_origins = select_synced_children::

(&self.synced_children, &new_incomplete_header); + let move_origins = move_origins.into_iter().chain(std::iter::once(new_incomplete_header)); + for move_origin in move_origins { + move_header_descendants::

( + &mut [&mut self.ready, &mut self.submitted], + &mut self.incomplete, + &mut self.known_headers, + HeaderStatus::Incomplete, + &move_origin, + ); + } + + if make_header_incomplete { + log::debug!( + target: "bridge", + "Scheduling completion data retrieval for header: {:?}", + new_incomplete_header, + ); + + self.incomplete_headers.insert(new_incomplete_header, None); + } + } + } + + /// When incomplete headers ids are receved from target node. + pub fn incomplete_headers_response(&mut self, ids: HashSet>) { + // all new incomplete headers are marked Synced and all their descendants + // are moved from Ready/Submitted to Incomplete queue + let new_incomplete_headers = ids + .iter() + .filter(|id| !self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id)) + .cloned() + .collect::>(); + self.add_incomplete_headers(true, new_incomplete_headers); + + // for all headers that were incompleted previously, but now are completed, we move + // all descendants from incomplete to ready + let just_completed_headers = self + .incomplete_headers + .keys() + .chain(self.completion_data.keys()) + .filter(|id| !ids.contains(id)) + .cloned() + .collect::>(); + for just_completed_header in just_completed_headers { + // sub2eth rejects H if H.Parent is incomplete + // sub2sub allows 'syncing' headers like that + // => let's check if there are some synced children of just completed header + let move_origins = select_synced_children::

(&self.synced_children, &just_completed_header); + let move_origins = move_origins.into_iter().chain(std::iter::once(just_completed_header)); + for move_origin in move_origins { + move_header_descendants::

( + &mut [&mut self.incomplete], + &mut self.ready, + &mut self.known_headers, + HeaderStatus::Ready, + &move_origin, + ); + } + + log::debug!( + target: "bridge", + "Completion data is no longer required for header: {:?}", + just_completed_header, + ); + + self.incomplete_headers.remove(&just_completed_header); + self.completion_data.remove(&just_completed_header); + } + } + + /// Returns true if given header requires completion data. + pub fn requires_completion_data(&self, id: &HeaderIdOf

) -> bool { + self.incomplete_headers.contains_key(id) + } + + /// Returns id of the header for which we want to fetch completion data. + pub fn incomplete_header(&mut self) -> Option> { + queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| { + let retry = match *last_fetch_time { + Some(last_fetch_time) => last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL, + None => true, + }; + + if retry { + *last_fetch_time = Some(Instant::now()); + } + + retry + }) + .map(|(id, _)| id) + } + + /// Returns header completion data to upload to target node. + pub fn header_to_complete(&mut self) -> Option<(HeaderIdOf

, &P::Completion)> { + queued_incomplete_header(&mut self.completion_data, |_| true) + } + + /// Prune and never accept headers before this block. + pub fn prune(&mut self, prune_border: P::Number) { + if prune_border <= self.prune_border { + return; + } + + prune_queue(&mut self.maybe_orphan, prune_border); + prune_queue(&mut self.orphan, prune_border); + prune_queue(&mut self.maybe_extra, prune_border); + prune_queue(&mut self.extra, prune_border); + prune_queue(&mut self.ready, prune_border); + prune_queue(&mut self.submitted, prune_border); + prune_queue(&mut self.incomplete, prune_border); + self.synced_children = self.synced_children.split_off(&prune_border); + prune_known_headers::

(&mut self.known_headers, prune_border); + self.prune_border = prune_border; + } + + /// Forgets all ever known headers. + pub fn clear(&mut self) { + self.maybe_orphan.clear(); + self.orphan.clear(); + self.maybe_extra.clear(); + self.extra.clear(); + self.ready.clear(); + self.incomplete.clear(); + self.submitted.clear(); + self.synced_children.clear(); + self.known_headers.clear(); + self.best_synced_number = Zero::zero(); + self.prune_border = Zero::zero(); + } + + /// Returns true if parent of this header is either incomplete or waiting for + /// its own incomplete ancestor to be completed. + fn is_parent_incomplete(&self, id: &HeaderIdOf

) -> bool { + let status = self.status(id); + let header = match status { + HeaderStatus::MaybeOrphan => header(&self.maybe_orphan, id), + HeaderStatus::Orphan => header(&self.orphan, id), + HeaderStatus::MaybeExtra => header(&self.maybe_extra, id), + HeaderStatus::Extra => header(&self.extra, id), + HeaderStatus::Ready => header(&self.ready, id), + HeaderStatus::Incomplete => header(&self.incomplete, id), + HeaderStatus::Submitted => header(&self.submitted, id), + HeaderStatus::Unknown => return false, + HeaderStatus::Synced => return false, + }; + + match header { + Some(header) => { + let parent_id = header.header().parent_id(); + self.incomplete_headers.contains_key(&parent_id) + || self.completion_data.contains_key(&parent_id) + || self.status(&parent_id) == HeaderStatus::Incomplete + } + None => false, + } + } + + /// When we receive new Synced header from target node. + fn header_synced(&mut self, id: &HeaderIdOf

) { + // update best synced block number + self.best_synced_number = std::cmp::max(self.best_synced_number, id.0); + + // all ancestors of this header are now synced => let's remove them from + // queues + let mut current = *id; + let mut id_processed = false; + let mut previous_current = None; + loop { + let header = match self.status(¤t) { + HeaderStatus::Unknown => break, + HeaderStatus::MaybeOrphan => remove_header(&mut self.maybe_orphan, ¤t), + HeaderStatus::Orphan => remove_header(&mut self.orphan, ¤t), + HeaderStatus::MaybeExtra => remove_header(&mut self.maybe_extra, ¤t), + HeaderStatus::Extra => remove_header(&mut self.extra, ¤t), + HeaderStatus::Ready => remove_header(&mut self.ready, ¤t), + HeaderStatus::Incomplete => remove_header(&mut self.incomplete, ¤t), + HeaderStatus::Submitted => remove_header(&mut self.submitted, ¤t), + HeaderStatus::Synced => break, + } + .expect("header has a given status; given queue has the header; qed"); + + // remember ids of all the children of the current header + let synced_children_entry = self + .synced_children + .entry(current.0) + .or_default() + .entry(current.1) + .or_default(); + let all_queues = [ + &self.maybe_orphan, + &self.orphan, + &self.maybe_extra, + &self.extra, + &self.ready, + &self.incomplete, + &self.submitted, + ]; + for queue in &all_queues { + let children_from_queue = queue + .get(&(current.0 + One::one())) + .map(|potential_children| { + potential_children + .values() + .filter(|potential_child| potential_child.header().parent_id() == current) + .map(|child| child.id()) + .collect::>() + }) + .unwrap_or_default(); + synced_children_entry.extend(children_from_queue); + } + if let Some(previous_current) = previous_current { + synced_children_entry.insert(previous_current); + } + + set_header_status::

(&mut self.known_headers, ¤t, HeaderStatus::Synced); + + previous_current = Some(current); + current = header.parent_id(); + id_processed = true; + } + + // remember that the header itself is synced + // (condition is here to avoid duplicate log messages) + if !id_processed { + set_header_status::

(&mut self.known_headers, &id, HeaderStatus::Synced); + } + + // now let's move all descendants from maybe_orphan && orphan queues to + // maybe_extra queue + move_header_descendants::

( + &mut [&mut self.maybe_orphan, &mut self.orphan], + &mut self.maybe_extra, + &mut self.known_headers, + HeaderStatus::MaybeExtra, + id, + ); + } +} + +/// Insert header to the queue. +fn insert_header(queue: &mut HeadersQueue

, id: HeaderIdOf

, header: QueuedHeader

) { + queue.entry(id.0).or_default().insert(id.1, header); +} + +/// Remove header from the queue. +fn remove_header(queue: &mut HeadersQueue

, id: &HeaderIdOf

) -> Option> { + let mut headers_at = match queue.entry(id.0) { + BTreeMapEntry::Occupied(headers_at) => headers_at, + BTreeMapEntry::Vacant(_) => return None, + }; + + let header = headers_at.get_mut().remove(&id.1); + if headers_at.get().is_empty() { + headers_at.remove(); + } + header +} + +/// Get header from the queue. +fn header<'a, P: HeadersSyncPipeline>(queue: &'a HeadersQueue

, id: &HeaderIdOf

) -> Option<&'a QueuedHeader

> { + queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1)) +} + +/// Move header from source to destination queue. +/// +/// Returns ID of parent header, if header has been moved, or None otherwise. +fn move_header( + source_queue: &mut HeadersQueue

, + destination_queue: &mut HeadersQueue

, + known_headers: &mut KnownHeaders

, + destination_status: HeaderStatus, + id: &HeaderIdOf

, + prepare: impl FnOnce(QueuedHeader

) -> QueuedHeader

, +) -> Option> { + let header = match remove_header(source_queue, id) { + Some(header) => prepare(header), + None => return None, + }; + + let parent_id = header.header().parent_id(); + destination_queue.entry(id.0).or_default().insert(id.1, header); + set_header_status::

(known_headers, id, destination_status); + + Some(parent_id) +} + +/// Move all descendant headers from the source to destination queue. +fn move_header_descendants( + source_queues: &mut [&mut HeadersQueue

], + destination_queue: &mut HeadersQueue

, + known_headers: &mut KnownHeaders

, + destination_status: HeaderStatus, + id: &HeaderIdOf

, +) { + let mut current_number = id.0 + One::one(); + let mut current_parents = HashSet::new(); + current_parents.insert(id.1); + + while !current_parents.is_empty() { + let mut next_parents = HashSet::new(); + for source_queue in source_queues.iter_mut() { + let mut source_entry = match source_queue.entry(current_number) { + BTreeMapEntry::Occupied(source_entry) => source_entry, + BTreeMapEntry::Vacant(_) => continue, + }; + + let mut headers_to_move = Vec::new(); + let children_at_number = source_entry.get().keys().cloned().collect::>(); + for key in children_at_number { + let entry = match source_entry.get_mut().entry(key) { + HashMapEntry::Occupied(entry) => entry, + HashMapEntry::Vacant(_) => unreachable!("iterating existing keys; qed"), + }; + + if current_parents.contains(&entry.get().header().parent_id().1) { + let header_to_move = entry.remove(); + let header_to_move_id = header_to_move.id(); + headers_to_move.push((header_to_move_id, header_to_move)); + set_header_status::

(known_headers, &header_to_move_id, destination_status); + } + } + + if source_entry.get().is_empty() { + source_entry.remove(); + } + + next_parents.extend(headers_to_move.iter().map(|(id, _)| id.1)); + + destination_queue + .entry(current_number) + .or_default() + .extend(headers_to_move.into_iter().map(|(id, h)| (id.1, h))) + } + + current_number = current_number + One::one(); + std::mem::swap(&mut current_parents, &mut next_parents); + } +} + +/// Selects (recursive) all synced children of given header. +fn select_synced_children( + synced_children: &SyncedChildren

, + id: &HeaderIdOf

, +) -> Vec> { + let mut result = Vec::new(); + let mut current_parents = HashSet::new(); + current_parents.insert(*id); + + while !current_parents.is_empty() { + let mut next_parents = HashSet::new(); + for current_parent in ¤t_parents { + let current_parent_synced_children = synced_children + .get(¤t_parent.0) + .and_then(|by_number_entry| by_number_entry.get(¤t_parent.1)); + if let Some(current_parent_synced_children) = current_parent_synced_children { + for current_parent_synced_child in current_parent_synced_children { + result.push(*current_parent_synced_child); + next_parents.insert(*current_parent_synced_child); + } + } + } + + let _ = std::mem::replace(&mut current_parents, next_parents); + } + + result +} + +/// Return oldest header from the queue. +fn oldest_header(queue: &HeadersQueue

) -> Option<&QueuedHeader

> { + queue.values().flat_map(|h| h.values()).next() +} + +/// Return oldest headers from the queue until functor will return false. +fn oldest_headers( + queue: &HeadersQueue

, + mut f: impl FnMut(&QueuedHeader

) -> bool, +) -> Option>> { + let result = queue + .values() + .flat_map(|h| h.values()) + .take_while(|h| f(h)) + .collect::>(); + if result.is_empty() { + None + } else { + Some(result) + } +} + +/// Forget all headers with number less than given. +fn prune_queue(queue: &mut HeadersQueue

, prune_border: P::Number) { + *queue = queue.split_off(&prune_border); +} + +/// Forget all known headers with number less than given. +fn prune_known_headers(known_headers: &mut KnownHeaders

, prune_border: P::Number) { + let new_known_headers = known_headers.split_off(&prune_border); + for (pruned_number, pruned_headers) in &*known_headers { + for pruned_hash in pruned_headers.keys() { + log::debug!(target: "bridge", "Pruning header {:?}.", HeaderId(*pruned_number, *pruned_hash)); + } + } + *known_headers = new_known_headers; +} + +/// Change header status. +fn set_header_status( + known_headers: &mut KnownHeaders

, + id: &HeaderIdOf

, + status: HeaderStatus, +) { + log::debug!( + target: "bridge", + "{} header {:?} is now {:?}", + P::SOURCE_NAME, + id, + status, + ); + *known_headers.entry(id.0).or_default().entry(id.1).or_insert(status) = status; +} + +/// Returns queued incomplete header with maximal elapsed time since last update. +fn queued_incomplete_header( + map: &mut LinkedHashMap, + filter: impl FnMut(&mut T) -> bool, +) -> Option<(Id, &T)> { + // TODO (#84): headers that have been just appended to the end of the queue would have to wait until + // all previous headers will be retried + + let retry_old_header = map + .front() + .map(|(key, _)| key.clone()) + .and_then(|key| map.get_mut(&key).map(filter)) + .unwrap_or(false); + if retry_old_header { + let (header_key, header) = map.pop_front().expect("we have checked that front() exists; qed"); + map.insert(header_key, header); + return map.back().map(|(id, data)| (id.clone(), data)); + } + + None +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use crate::sync_loop_tests::{TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber}; + use crate::sync_types::QueuedHeader; + + pub(crate) fn header(number: TestNumber) -> QueuedHeader { + QueuedHeader::new(TestHeader { + number, + hash: hash(number), + parent_hash: hash(number - 1), + }) + } + + pub(crate) fn hash(number: TestNumber) -> TestHash { + number + } + + pub(crate) fn id(number: TestNumber) -> TestHeaderId { + HeaderId(number, hash(number)) + } + + #[test] + fn total_headers_works() { + // total headers just sums up number of headers in every queue + let mut queue = QueuedHeaders::::default(); + queue.maybe_orphan.entry(1).or_default().insert( + hash(1), + QueuedHeader::::new(Default::default()), + ); + queue.maybe_orphan.entry(1).or_default().insert( + hash(2), + QueuedHeader::::new(Default::default()), + ); + queue.maybe_orphan.entry(2).or_default().insert( + hash(3), + QueuedHeader::::new(Default::default()), + ); + queue.orphan.entry(3).or_default().insert( + hash(4), + QueuedHeader::::new(Default::default()), + ); + queue.maybe_extra.entry(4).or_default().insert( + hash(5), + QueuedHeader::::new(Default::default()), + ); + queue.ready.entry(5).or_default().insert( + hash(6), + QueuedHeader::::new(Default::default()), + ); + queue.incomplete.entry(6).or_default().insert( + hash(7), + QueuedHeader::::new(Default::default()), + ); + assert_eq!(queue.total_headers(), 7); + } + + #[test] + fn best_queued_number_works() { + // initially there are headers in MaybeOrphan queue only + let mut queue = QueuedHeaders::::default(); + queue.maybe_orphan.entry(1).or_default().insert( + hash(1), + QueuedHeader::::new(Default::default()), + ); + queue.maybe_orphan.entry(1).or_default().insert( + hash(2), + QueuedHeader::::new(Default::default()), + ); + queue.maybe_orphan.entry(3).or_default().insert( + hash(3), + QueuedHeader::::new(Default::default()), + ); + assert_eq!(queue.best_queued_number(), 3); + // and then there's better header in Orphan + queue.orphan.entry(10).or_default().insert( + hash(10), + QueuedHeader::::new(Default::default()), + ); + assert_eq!(queue.best_queued_number(), 10); + // and then there's better header in MaybeExtra + queue.maybe_extra.entry(20).or_default().insert( + hash(20), + QueuedHeader::::new(Default::default()), + ); + assert_eq!(queue.best_queued_number(), 20); + // and then there's better header in Ready + queue.ready.entry(30).or_default().insert( + hash(30), + QueuedHeader::::new(Default::default()), + ); + assert_eq!(queue.best_queued_number(), 30); + // and then there's better header in MaybeOrphan again + queue.maybe_orphan.entry(40).or_default().insert( + hash(40), + QueuedHeader::::new(Default::default()), + ); + assert_eq!(queue.best_queued_number(), 40); + // and then there's some header in Incomplete + queue.incomplete.entry(50).or_default().insert( + hash(50), + QueuedHeader::::new(Default::default()), + ); + assert_eq!(queue.best_queued_number(), 50); + } + + #[test] + fn status_works() { + // all headers are unknown initially + let mut queue = QueuedHeaders::::default(); + assert_eq!(queue.status(&id(10)), HeaderStatus::Unknown); + // and status is read from the KnownHeaders + queue + .known_headers + .entry(10) + .or_default() + .insert(hash(10), HeaderStatus::Ready); + assert_eq!(queue.status(&id(10)), HeaderStatus::Ready); + } + + #[test] + fn header_works() { + // initially we have oldest header #10 + let mut queue = QueuedHeaders::::default(); + queue.maybe_orphan.entry(10).or_default().insert(hash(1), header(100)); + assert_eq!( + queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, + hash(100) + ); + // inserting #20 changes nothing + queue.maybe_orphan.entry(20).or_default().insert(hash(1), header(101)); + assert_eq!( + queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, + hash(100) + ); + // inserting #5 makes it oldest + queue.maybe_orphan.entry(5).or_default().insert(hash(1), header(102)); + assert_eq!( + queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, + hash(102) + ); + } + + #[test] + fn header_response_works() { + // when parent is Synced, we insert to MaybeExtra + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Synced); + queue.header_response(header(101).header().clone()); + assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); + + // when parent is Ready, we insert to MaybeExtra + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Ready); + queue.header_response(header(101).header().clone()); + assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); + + // when parent is Receipts, we insert to MaybeExtra + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Extra); + queue.header_response(header(101).header().clone()); + assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); + + // when parent is MaybeExtra, we insert to MaybeExtra + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::MaybeExtra); + queue.header_response(header(101).header().clone()); + assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeExtra); + + // when parent is Orphan, we insert to Orphan + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Orphan); + queue.header_response(header(101).header().clone()); + assert_eq!(queue.status(&id(101)), HeaderStatus::Orphan); + + // when parent is MaybeOrphan, we insert to MaybeOrphan + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::MaybeOrphan); + queue.header_response(header(101).header().clone()); + assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); + + // when parent is unknown, we insert to MaybeOrphan + let mut queue = QueuedHeaders::::default(); + queue.header_response(header(101).header().clone()); + assert_eq!(queue.status(&id(101)), HeaderStatus::MaybeOrphan); + } + + #[test] + fn ancestors_are_synced_on_substrate_best_header_response() { + // let's say someone else has submitted transaction to bridge that changes + // its best block to #100. At this time we have: + // #100 in MaybeOrphan + // #99 in Orphan + // #98 in MaybeExtra + // #97 in Receipts + // #96 in Ready + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::MaybeOrphan); + queue + .maybe_orphan + .entry(100) + .or_default() + .insert(hash(100), header(100)); + queue + .known_headers + .entry(99) + .or_default() + .insert(hash(99), HeaderStatus::Orphan); + queue.orphan.entry(99).or_default().insert(hash(99), header(99)); + queue + .known_headers + .entry(98) + .or_default() + .insert(hash(98), HeaderStatus::MaybeExtra); + queue.maybe_extra.entry(98).or_default().insert(hash(98), header(98)); + queue + .known_headers + .entry(97) + .or_default() + .insert(hash(97), HeaderStatus::Extra); + queue.extra.entry(97).or_default().insert(hash(97), header(97)); + queue + .known_headers + .entry(96) + .or_default() + .insert(hash(96), HeaderStatus::Ready); + queue.ready.entry(96).or_default().insert(hash(96), header(96)); + queue.target_best_header_response(&id(100)); + + // then the #100 and all ancestors of #100 (#96..#99) are treated as synced + assert!(queue.maybe_orphan.is_empty()); + assert!(queue.orphan.is_empty()); + assert!(queue.maybe_extra.is_empty()); + assert!(queue.extra.is_empty()); + assert!(queue.ready.is_empty()); + assert_eq!(queue.known_headers.len(), 5); + assert!(queue + .known_headers + .values() + .all(|s| s.values().all(|s| *s == HeaderStatus::Synced))); + + // children of synced headers are stored + assert_eq!( + vec![id(97)], + queue.synced_children[&96][&hash(96)] + .iter() + .cloned() + .collect::>() + ); + assert_eq!( + vec![id(98)], + queue.synced_children[&97][&hash(97)] + .iter() + .cloned() + .collect::>() + ); + assert_eq!( + vec![id(99)], + queue.synced_children[&98][&hash(98)] + .iter() + .cloned() + .collect::>() + ); + assert_eq!( + vec![id(100)], + queue.synced_children[&99][&hash(99)] + .iter() + .cloned() + .collect::>() + ); + assert_eq!(0, queue.synced_children[&100][&hash(100)].len()); + } + + #[test] + fn descendants_are_moved_on_substrate_best_header_response() { + // let's say someone else has submitted transaction to bridge that changes + // its best block to #100. At this time we have: + // #101 in Orphan + // #102 in MaybeOrphan + // #103 in Orphan + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::Orphan); + queue.orphan.entry(101).or_default().insert(hash(101), header(101)); + queue + .known_headers + .entry(102) + .or_default() + .insert(hash(102), HeaderStatus::MaybeOrphan); + queue + .maybe_orphan + .entry(102) + .or_default() + .insert(hash(102), header(102)); + queue + .known_headers + .entry(103) + .or_default() + .insert(hash(103), HeaderStatus::Orphan); + queue.orphan.entry(103).or_default().insert(hash(103), header(103)); + queue.target_best_header_response(&id(100)); + + // all descendants are moved to MaybeExtra + assert!(queue.maybe_orphan.is_empty()); + assert!(queue.orphan.is_empty()); + assert_eq!(queue.maybe_extra.len(), 3); + assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); + assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); + assert_eq!(queue.known_headers[&103][&hash(103)], HeaderStatus::MaybeExtra); + } + + #[test] + fn positive_maybe_orphan_response_works() { + // let's say we have: + // #100 in MaybeOrphan + // #101 in Orphan + // #102 in MaybeOrphan + // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) + // and the response is: YES, #99 is known to the Substrate runtime + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::MaybeOrphan); + queue + .maybe_orphan + .entry(100) + .or_default() + .insert(hash(100), header(100)); + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::Orphan); + queue.orphan.entry(101).or_default().insert(hash(101), header(101)); + queue + .known_headers + .entry(102) + .or_default() + .insert(hash(102), HeaderStatus::MaybeOrphan); + queue + .maybe_orphan + .entry(102) + .or_default() + .insert(hash(102), header(102)); + queue.maybe_orphan_response(&id(99), true); + + // then all headers (#100..#103) are moved to the MaybeExtra queue + assert!(queue.orphan.is_empty()); + assert!(queue.maybe_orphan.is_empty()); + assert_eq!(queue.maybe_extra.len(), 3); + assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::MaybeExtra); + assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::MaybeExtra); + assert_eq!(queue.known_headers[&102][&hash(102)], HeaderStatus::MaybeExtra); + } + + #[test] + fn negative_maybe_orphan_response_works() { + // let's say we have: + // #100 in MaybeOrphan + // #101 in MaybeOrphan + // and we have asked for MaybeOrphan status of #100.parent (i.e. #99) + // and the response is: NO, #99 is NOT known to the Substrate runtime + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::MaybeOrphan); + queue + .maybe_orphan + .entry(100) + .or_default() + .insert(hash(100), header(100)); + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::MaybeOrphan); + queue + .maybe_orphan + .entry(101) + .or_default() + .insert(hash(101), header(101)); + queue.maybe_orphan_response(&id(99), false); + + // then all headers (#100..#101) are moved to the Orphan queue + assert!(queue.maybe_orphan.is_empty()); + assert_eq!(queue.orphan.len(), 2); + assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Orphan); + assert_eq!(queue.known_headers[&101][&hash(101)], HeaderStatus::Orphan); + } + + #[test] + fn positive_maybe_extra_response_works() { + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::MaybeExtra); + queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); + queue.maybe_extra_response(&id(100), true); + assert!(queue.maybe_extra.is_empty()); + assert_eq!(queue.extra.len(), 1); + assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Extra); + } + + #[test] + fn negative_maybe_extra_response_works() { + // when parent header is complete + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::MaybeExtra); + queue.maybe_extra.entry(100).or_default().insert(hash(100), header(100)); + queue.maybe_extra_response(&id(100), false); + assert!(queue.maybe_extra.is_empty()); + assert_eq!(queue.ready.len(), 1); + assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); + + // when parent header is incomplete + queue.incomplete_headers.insert(id(200), None); + queue + .known_headers + .entry(201) + .or_default() + .insert(hash(201), HeaderStatus::MaybeExtra); + queue.maybe_extra.entry(201).or_default().insert(hash(201), header(201)); + queue.maybe_extra_response(&id(201), false); + assert!(queue.maybe_extra.is_empty()); + assert_eq!(queue.incomplete.len(), 1); + assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); + } + + #[test] + fn receipts_response_works() { + // when parent header is complete + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Extra); + queue.extra.entry(100).or_default().insert(hash(100), header(100)); + queue.extra_response(&id(100), 100_100); + assert!(queue.extra.is_empty()); + assert_eq!(queue.ready.len(), 1); + assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Ready); + + // when parent header is incomplete + queue.incomplete_headers.insert(id(200), None); + queue + .known_headers + .entry(201) + .or_default() + .insert(hash(201), HeaderStatus::Extra); + queue.extra.entry(201).or_default().insert(hash(201), header(201)); + queue.extra_response(&id(201), 201_201); + assert!(queue.extra.is_empty()); + assert_eq!(queue.incomplete.len(), 1); + assert_eq!(queue.known_headers[&201][&hash(201)], HeaderStatus::Incomplete); + } + + #[test] + fn header_submitted_works() { + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Ready); + queue.ready.entry(100).or_default().insert(hash(100), header(100)); + queue.headers_submitted(vec![id(100)]); + assert!(queue.ready.is_empty()); + assert_eq!(queue.known_headers[&100][&hash(100)], HeaderStatus::Submitted); + } + + #[test] + fn incomplete_header_works() { + let mut queue = QueuedHeaders::::default(); + + // nothing to complete if queue is empty + assert_eq!(queue.incomplete_header(), None); + + // when there's new header to complete => ask for completion data + queue.incomplete_headers.insert(id(100), None); + assert_eq!(queue.incomplete_header(), Some(id(100))); + + // we have just asked for completion data => nothing to request + assert_eq!(queue.incomplete_header(), None); + + // enough time have passed => ask again + queue.incomplete_headers.clear(); + queue.incomplete_headers.insert( + id(100), + Some(Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL), + ); + assert_eq!(queue.incomplete_header(), Some(id(100))); + } + + #[test] + fn completion_response_works() { + let mut queue = QueuedHeaders::::default(); + queue.incomplete_headers.insert(id(100), None); + queue.incomplete_headers.insert(id(200), Some(Instant::now())); + queue.incomplete_headers.insert(id(300), Some(Instant::now())); + + // when header isn't incompete, nothing changes + queue.completion_response(&id(400), None); + assert_eq!(queue.incomplete_headers.len(), 3); + assert_eq!(queue.completion_data.len(), 0); + assert_eq!(queue.header_to_complete(), None); + + // when response is None, nothing changes + queue.completion_response(&id(100), None); + assert_eq!(queue.incomplete_headers.len(), 3); + assert_eq!(queue.completion_data.len(), 0); + assert_eq!(queue.header_to_complete(), None); + + // when response is Some, we're scheduling completion + queue.completion_response(&id(200), Some(200_200)); + assert_eq!(queue.completion_data.len(), 1); + assert!(queue.completion_data.contains_key(&id(200))); + assert_eq!(queue.header_to_complete(), Some((id(200), &200_200))); + assert_eq!( + queue.incomplete_headers.keys().collect::>(), + vec![&id(100), &id(300), &id(200)], + ); + } + + #[test] + fn header_completed_works() { + let mut queue = QueuedHeaders::::default(); + queue.completion_data.insert(id(100), 100_100); + + // when unknown header is completed + queue.header_completed(&id(200)); + assert_eq!(queue.completion_data.len(), 1); + + // when known header is completed + queue.header_completed(&id(100)); + assert_eq!(queue.completion_data.len(), 0); + } + + #[test] + fn incomplete_headers_response_works() { + let mut queue = QueuedHeaders::::default(); + + // when we have already submitted #101 and #102 is ready + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::Submitted); + queue.submitted.entry(101).or_default().insert(hash(101), header(101)); + queue + .known_headers + .entry(102) + .or_default() + .insert(hash(102), HeaderStatus::Ready); + queue.submitted.entry(102).or_default().insert(hash(102), header(102)); + + // AND now we know that the #100 is incomplete + queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); + + // => #101 and #102 are moved to the Incomplete and #100 is now synced + assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(101)), HeaderStatus::Incomplete); + assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); + assert_eq!(queue.submitted.len(), 0); + assert_eq!(queue.ready.len(), 0); + assert!(queue.incomplete.entry(101).or_default().contains_key(&hash(101))); + assert!(queue.incomplete.entry(102).or_default().contains_key(&hash(102))); + assert!(queue.incomplete_headers.contains_key(&id(100))); + assert!(queue.completion_data.is_empty()); + + // and then header #100 is no longer incomplete + queue.incomplete_headers_response(vec![].into_iter().collect()); + + // => #101 and #102 are moved to the Ready queue and #100 if now forgotten + assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(101)), HeaderStatus::Ready); + assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); + assert_eq!(queue.incomplete.len(), 0); + assert_eq!(queue.submitted.len(), 0); + assert!(queue.ready.entry(101).or_default().contains_key(&hash(101))); + assert!(queue.ready.entry(102).or_default().contains_key(&hash(102))); + assert!(queue.incomplete_headers.is_empty()); + assert!(queue.completion_data.is_empty()); + } + + #[test] + fn is_parent_incomplete_works() { + let mut queue = QueuedHeaders::::default(); + + // when we do not know header itself + assert_eq!(queue.is_parent_incomplete(&id(50)), false); + + // when we do not know parent + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Incomplete); + queue.incomplete.entry(100).or_default().insert(hash(100), header(100)); + assert_eq!(queue.is_parent_incomplete(&id(100)), false); + + // when parent is inside incomplete queue (i.e. some other ancestor is actually incomplete) + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::Submitted); + queue.submitted.entry(101).or_default().insert(hash(101), header(101)); + assert_eq!(queue.is_parent_incomplete(&id(101)), true); + + // when parent is the incomplete header and we do not have completion data + queue.incomplete_headers.insert(id(199), None); + queue + .known_headers + .entry(200) + .or_default() + .insert(hash(200), HeaderStatus::Submitted); + queue.submitted.entry(200).or_default().insert(hash(200), header(200)); + assert_eq!(queue.is_parent_incomplete(&id(200)), true); + + // when parent is the incomplete header and we have completion data + queue.completion_data.insert(id(299), 299_299); + queue + .known_headers + .entry(300) + .or_default() + .insert(hash(300), HeaderStatus::Submitted); + queue.submitted.entry(300).or_default().insert(hash(300), header(300)); + assert_eq!(queue.is_parent_incomplete(&id(300)), true); + } + + #[test] + fn prune_works() { + let mut queue = QueuedHeaders::::default(); + queue + .known_headers + .entry(105) + .or_default() + .insert(hash(105), HeaderStatus::Incomplete); + queue.incomplete.entry(105).or_default().insert(hash(105), header(105)); + queue + .known_headers + .entry(104) + .or_default() + .insert(hash(104), HeaderStatus::MaybeOrphan); + queue + .maybe_orphan + .entry(104) + .or_default() + .insert(hash(104), header(104)); + queue + .known_headers + .entry(103) + .or_default() + .insert(hash(103), HeaderStatus::Orphan); + queue.orphan.entry(103).or_default().insert(hash(103), header(103)); + queue + .known_headers + .entry(102) + .or_default() + .insert(hash(102), HeaderStatus::MaybeExtra); + queue.maybe_extra.entry(102).or_default().insert(hash(102), header(102)); + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::Extra); + queue.extra.entry(101).or_default().insert(hash(101), header(101)); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Ready); + queue.ready.entry(100).or_default().insert(hash(100), header(100)); + queue + .synced_children + .entry(100) + .or_default() + .insert(hash(100), vec![id(101)].into_iter().collect()); + queue + .synced_children + .entry(102) + .or_default() + .insert(hash(102), vec![id(102)].into_iter().collect()); + + queue.prune(102); + + assert_eq!(queue.ready.len(), 0); + assert_eq!(queue.extra.len(), 0); + assert_eq!(queue.maybe_extra.len(), 1); + assert_eq!(queue.orphan.len(), 1); + assert_eq!(queue.maybe_orphan.len(), 1); + assert_eq!(queue.incomplete.len(), 1); + assert_eq!(queue.synced_children.len(), 1); + assert_eq!(queue.known_headers.len(), 4); + + queue.prune(110); + + assert_eq!(queue.ready.len(), 0); + assert_eq!(queue.extra.len(), 0); + assert_eq!(queue.maybe_extra.len(), 0); + assert_eq!(queue.orphan.len(), 0); + assert_eq!(queue.maybe_orphan.len(), 0); + assert_eq!(queue.incomplete.len(), 0); + assert_eq!(queue.synced_children.len(), 0); + assert_eq!(queue.known_headers.len(), 0); + + queue.header_response(header(109).header().clone()); + assert_eq!(queue.known_headers.len(), 0); + + queue.header_response(header(110).header().clone()); + assert_eq!(queue.known_headers.len(), 1); + } + + #[test] + fn incomplete_headers_are_still_incomplete_after_advance() { + let mut queue = QueuedHeaders::::default(); + + // relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete queue + queue.incomplete_headers.insert(id(100), None); + queue.incomplete.entry(101).or_default().insert(hash(101), header(101)); + queue.incomplete.entry(102).or_default().insert(hash(102), header(102)); + queue.incomplete.entry(103).or_default().insert(hash(103), header(103)); + queue.incomplete.entry(104).or_default().insert(hash(104), header(104)); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Synced); + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::Incomplete); + queue + .known_headers + .entry(102) + .or_default() + .insert(hash(102), HeaderStatus::Incomplete); + queue + .known_headers + .entry(103) + .or_default() + .insert(hash(103), HeaderStatus::Incomplete); + queue + .known_headers + .entry(104) + .or_default() + .insert(hash(104), HeaderStatus::Incomplete); + + // let's say relay#2 completes header#100 and then submits header#101+header#102 and it turns + // out that header#102 is also incomplete + queue.incomplete_headers_response(vec![id(102)].into_iter().collect()); + + // then the header#103 and the header#104 must have Incomplete status + assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(102)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(103)), HeaderStatus::Incomplete); + assert_eq!(queue.status(&id(104)), HeaderStatus::Incomplete); + } + + #[test] + fn incomplete_headers_response_moves_synced_headers() { + let mut queue = QueuedHeaders::::default(); + + // we have submitted two headers - 100 and 101. 102 is ready + queue.submitted.entry(100).or_default().insert(hash(100), header(100)); + queue.submitted.entry(101).or_default().insert(hash(101), header(101)); + queue.ready.entry(102).or_default().insert(hash(102), header(102)); + queue + .known_headers + .entry(100) + .or_default() + .insert(hash(100), HeaderStatus::Submitted); + queue + .known_headers + .entry(101) + .or_default() + .insert(hash(101), HeaderStatus::Submitted); + queue + .known_headers + .entry(102) + .or_default() + .insert(hash(102), HeaderStatus::Ready); + + // both headers are accepted + queue.target_best_header_response(&id(101)); + + // but header 100 is incomplete + queue.incomplete_headers_response(vec![id(100)].into_iter().collect()); + assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(102)), HeaderStatus::Incomplete); + assert!(queue.incomplete_headers.contains_key(&id(100))); + assert!(queue.incomplete[&102].contains_key(&hash(102))); + + // when header 100 is completed, 101 is synced and 102 is ready + queue.incomplete_headers_response(HashSet::new()); + assert_eq!(queue.status(&id(100)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(101)), HeaderStatus::Synced); + assert_eq!(queue.status(&id(102)), HeaderStatus::Ready); + assert!(queue.ready[&102].contains_key(&hash(102))); + } +} diff --git a/polkadot/relays/headers/src/lib.rs b/polkadot/relays/headers/src/lib.rs new file mode 100644 index 00000000000..8946355921f --- /dev/null +++ b/polkadot/relays/headers/src/lib.rs @@ -0,0 +1,33 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relaying source chain headers to target chain. This module provides entrypoint +//! that starts reading new headers from source chain and submit these headers as +//! module/contract transactions to the target chain. Pallet/contract on the target +//! chain is a light-client of the source chain. All other trustless bridge +//! applications are built using this light-client, so running headers-relay is +//! essential for running all other bridge applications. + +// required for futures::select! +#![recursion_limit = "1024"] +#![warn(missing_docs)] + +pub mod headers; +pub mod sync; +pub mod sync_loop; +pub mod sync_loop_metrics; +pub mod sync_loop_tests; +pub mod sync_types; diff --git a/polkadot/relays/headers/src/sync.rs b/polkadot/relays/headers/src/sync.rs new file mode 100644 index 00000000000..e992b1f8e58 --- /dev/null +++ b/polkadot/relays/headers/src/sync.rs @@ -0,0 +1,523 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Headers synchronization context. This structure wraps headers queue and is +//! able to choose: which headers to read from the source chain? Which headers +//! to submit to the target chain? The context makes decisions basing on parameters +//! passed using `HeadersSyncParams` structure. + +use crate::headers::QueuedHeaders; +use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader}; +use num_traits::{One, Saturating, Zero}; + +/// Common sync params. +#[derive(Debug, Clone)] +pub struct HeadersSyncParams { + /// Maximal number of ethereum headers to pre-download. + pub max_future_headers_to_download: usize, + /// Maximal number of active (we believe) submit header transactions. + pub max_headers_in_submitted_status: usize, + /// Maximal number of headers in single submit request. + pub max_headers_in_single_submit: usize, + /// Maximal total headers size in single submit request. + pub max_headers_size_in_single_submit: usize, + /// We only may store and accept (from Ethereum node) headers that have + /// number >= than best_substrate_header.number - prune_depth. + pub prune_depth: u32, + /// Target transactions mode. + pub target_tx_mode: TargetTransactionMode, +} + +/// Target transaction mode. +#[derive(Debug, PartialEq, Clone)] +pub enum TargetTransactionMode { + /// Submit new headers using signed transactions. + Signed, + /// Submit new headers using unsigned transactions. + Unsigned, + /// Submit new headers using signed transactions, but only when we + /// believe that sync has stalled. + Backup, +} + +/// Headers synchronization context. +#[derive(Debug)] +pub struct HeadersSync { + /// Synchronization parameters. + params: HeadersSyncParams, + /// Best header number known to source node. + source_best_number: Option, + /// Best header known to target node. + target_best_header: Option>, + /// Headers queue. + headers: QueuedHeaders

, + /// Pause headers submission. + pause_submit: bool, +} + +impl HeadersSync

{ + /// Creates new headers synchronizer. + pub fn new(params: HeadersSyncParams) -> Self { + HeadersSync { + headers: QueuedHeaders::default(), + params, + source_best_number: None, + target_best_header: None, + pause_submit: false, + } + } + + /// Return best header number known to source node. + pub fn source_best_number(&self) -> Option { + self.source_best_number + } + + /// Best header known to target node. + pub fn target_best_header(&self) -> Option> { + self.target_best_header + } + + /// Returns true if we have synced almost all known headers. + pub fn is_almost_synced(&self) -> bool { + match self.source_best_number { + Some(source_best_number) => self + .target_best_header + .map(|best| source_best_number.saturating_sub(best.0) < 4.into()) + .unwrap_or(false), + None => true, + } + } + + /// Returns synchronization status. + pub fn status(&self) -> (&Option>, &Option) { + (&self.target_best_header, &self.source_best_number) + } + + /// Returns reference to the headers queue. + pub fn headers(&self) -> &QueuedHeaders

{ + &self.headers + } + + /// Returns mutable reference to the headers queue. + pub fn headers_mut(&mut self) -> &mut QueuedHeaders

{ + &mut self.headers + } + + /// Select header that needs to be downloaded from the source node. + pub fn select_new_header_to_download(&self) -> Option { + // if we haven't received best header from source node yet, there's nothing we can download + let source_best_number = self.source_best_number?; + + // if we haven't received known best header from target node yet, there's nothing we can download + let target_best_header = self.target_best_header.as_ref()?; + + // if there's too many headers in the queue, stop downloading + let in_memory_headers = self.headers.total_headers(); + if in_memory_headers >= self.params.max_future_headers_to_download { + return None; + } + + // if queue is empty and best header on target is > than best header on source, + // then we shoud reorg + let best_queued_number = self.headers.best_queued_number(); + if best_queued_number.is_zero() && source_best_number < target_best_header.0 { + return Some(source_best_number); + } + + // we assume that there were no reorgs if we have already downloaded best header + let best_downloaded_number = std::cmp::max( + std::cmp::max(best_queued_number, self.headers.best_synced_number()), + target_best_header.0, + ); + if best_downloaded_number >= source_best_number { + return None; + } + + // download new header + Some(best_downloaded_number + One::one()) + } + + /// Selech orphan header to downoload. + pub fn select_orphan_header_to_download(&self) -> Option<&QueuedHeader

> { + let orphan_header = self.headers.header(HeaderStatus::Orphan)?; + + // we consider header orphan until we'll find it ancestor that is known to the target node + // => we may get orphan header while we ask target node whether it knows its parent + // => let's avoid fetching duplicate headers + let parent_id = orphan_header.parent_id(); + if self.headers.status(&parent_id) != HeaderStatus::Unknown { + return None; + } + + Some(orphan_header) + } + + /// Select headers that need to be submitted to the target node. + pub fn select_headers_to_submit(&self, stalled: bool) -> Option>> { + // maybe we have paused new headers submit? + if self.pause_submit { + return None; + } + + // if we operate in backup mode, we only submit headers when sync has stalled + if self.params.target_tx_mode == TargetTransactionMode::Backup && !stalled { + return None; + } + + let headers_in_submit_status = self.headers.headers_in_status(HeaderStatus::Submitted); + let headers_to_submit_count = self + .params + .max_headers_in_submitted_status + .checked_sub(headers_in_submit_status)?; + + let mut total_size = 0; + let mut total_headers = 0; + self.headers.headers(HeaderStatus::Ready, |header| { + if total_headers == headers_to_submit_count { + return false; + } + if total_headers == self.params.max_headers_in_single_submit { + return false; + } + + let encoded_size = P::estimate_size(header); + if total_headers != 0 && total_size + encoded_size > self.params.max_headers_size_in_single_submit { + return false; + } + + total_size += encoded_size; + total_headers += 1; + + true + }) + } + + /// Receive new target header number from the source node. + pub fn source_best_header_number_response(&mut self, best_header_number: P::Number) { + log::debug!( + target: "bridge", + "Received best header number from {} node: {}", + P::SOURCE_NAME, + best_header_number, + ); + self.source_best_number = Some(best_header_number); + } + + /// Receive new best header from the target node. + /// Returns true if it is different from the previous block known to us. + pub fn target_best_header_response(&mut self, best_header: HeaderIdOf

) -> bool { + log::debug!( + target: "bridge", + "Received best known header from {}: {:?}", + P::TARGET_NAME, + best_header, + ); + + // early return if it is still the same + if self.target_best_header == Some(best_header) { + return false; + } + + // remember that this header is now known to the Substrate runtime + self.headers.target_best_header_response(&best_header); + + // prune ancient headers + self.headers + .prune(best_header.0.saturating_sub(self.params.prune_depth.into())); + + // finally remember the best header itself + self.target_best_header = Some(best_header); + + // we are ready to submit headers again + if self.pause_submit { + log::debug!( + target: "bridge", + "Ready to submit {} headers to {} node again!", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + + self.pause_submit = false; + } + + true + } + + /// Pause headers submit until best header will be updated on target node. + pub fn pause_submit(&mut self) { + log::debug!( + target: "bridge", + "Stopping submitting {} headers to {} node. Waiting for {} submitted headers to be accepted", + P::SOURCE_NAME, + P::TARGET_NAME, + self.headers.headers_in_status(HeaderStatus::Submitted), + ); + + self.pause_submit = true; + } + + /// Restart synchronization. + pub fn restart(&mut self) { + self.source_best_number = None; + self.target_best_header = None; + self.headers.clear(); + self.pause_submit = false; + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::headers::tests::{header, id}; + use crate::sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber}; + use crate::sync_types::HeaderStatus; + use relay_utils::HeaderId; + + fn side_hash(number: TestNumber) -> TestHash { + 1000 + number + } + + pub fn default_sync_params() -> HeadersSyncParams { + HeadersSyncParams { + max_future_headers_to_download: 128, + max_headers_in_submitted_status: 128, + max_headers_in_single_submit: 32, + max_headers_size_in_single_submit: 131_072, + prune_depth: 4096, + target_tx_mode: TargetTransactionMode::Signed, + } + } + + #[test] + fn select_new_header_to_download_works() { + let mut eth_sync = HeadersSync::::new(default_sync_params()); + + // both best && target headers are unknown + assert_eq!(eth_sync.select_new_header_to_download(), None); + + // best header is known, target header is unknown + eth_sync.target_best_header = Some(HeaderId(0, Default::default())); + assert_eq!(eth_sync.select_new_header_to_download(), None); + + // target header is known, best header is unknown + eth_sync.target_best_header = None; + eth_sync.source_best_number = Some(100); + assert_eq!(eth_sync.select_new_header_to_download(), None); + + // when our best block has the same number as the target + eth_sync.target_best_header = Some(HeaderId(100, Default::default())); + assert_eq!(eth_sync.select_new_header_to_download(), None); + + // when we actually need a new header + eth_sync.source_best_number = Some(101); + assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); + + // when we have to reorganize to longer fork + eth_sync.source_best_number = Some(100); + eth_sync.target_best_header = Some(HeaderId(200, Default::default())); + assert_eq!(eth_sync.select_new_header_to_download(), Some(100)); + + // when there are too many headers scheduled for submitting + for i in 1..1000 { + eth_sync.headers.header_response(header(i).header().clone()); + } + assert_eq!(eth_sync.select_new_header_to_download(), None); + } + + #[test] + fn select_new_header_to_download_works_with_empty_queue() { + let mut eth_sync = HeadersSync::::new(default_sync_params()); + eth_sync.source_best_header_number_response(100); + + // when queue is not empty => everything goes as usually + eth_sync.target_best_header_response(header(10).id()); + eth_sync.headers_mut().header_response(header(11).header().clone()); + eth_sync.headers_mut().maybe_extra_response(&header(11).id(), false); + assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); + + // but then queue is drained + eth_sync.headers_mut().target_best_header_response(&header(11).id()); + + // even though it's empty, we know that header#11 is synced + assert_eq!(eth_sync.headers().best_queued_number(), 0); + assert_eq!(eth_sync.headers().best_synced_number(), 11); + assert_eq!(eth_sync.select_new_header_to_download(), Some(12)); + } + + #[test] + fn sync_without_reorgs_works() { + let mut eth_sync = HeadersSync::new(default_sync_params()); + eth_sync.params.max_headers_in_submitted_status = 1; + + // ethereum reports best header #102 + eth_sync.source_best_header_number_response(102); + + // substrate reports that it is at block #100 + eth_sync.target_best_header_response(id(100)); + + // block #101 is downloaded first + assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); + eth_sync.headers.header_response(header(101).header().clone()); + + // now header #101 is ready to be submitted + assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); + eth_sync.headers.maybe_extra_response(&id(101), false); + assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(101))); + assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); + + // and header #102 is ready to be downloaded + assert_eq!(eth_sync.select_new_header_to_download(), Some(102)); + eth_sync.headers.header_response(header(102).header().clone()); + + // receive submission confirmation + eth_sync.headers.headers_submitted(vec![id(101)]); + + // we have nothing to submit because previous header hasn't been confirmed yet + // (and we allow max 1 submit transaction in the wild) + assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(102))); + eth_sync.headers.maybe_extra_response(&id(102), false); + assert_eq!(eth_sync.headers.header(HeaderStatus::Ready), Some(&header(102))); + assert_eq!(eth_sync.select_headers_to_submit(false), None); + + // substrate reports that it has imported block #101 + eth_sync.target_best_header_response(id(101)); + + // and we are ready to submit #102 + assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); + eth_sync.headers.headers_submitted(vec![id(102)]); + + // substrate reports that it has imported block #102 + eth_sync.target_best_header_response(id(102)); + + // and we have nothing to download + assert_eq!(eth_sync.select_new_header_to_download(), None); + } + + #[test] + fn sync_with_orphan_headers_work() { + let mut eth_sync = HeadersSync::new(default_sync_params()); + + // ethereum reports best header #102 + eth_sync.source_best_header_number_response(102); + + // substrate reports that it is at block #100, but it isn't part of best chain + eth_sync.target_best_header_response(HeaderId(100, side_hash(100))); + + // block #101 is downloaded first + assert_eq!(eth_sync.select_new_header_to_download(), Some(101)); + eth_sync.headers.header_response(header(101).header().clone()); + + // we can't submit header #101, because its parent status is unknown + assert_eq!(eth_sync.select_headers_to_submit(false), None); + + // instead we are trying to determine status of its parent (#100) + assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(101))); + + // and the status is still unknown + eth_sync.headers.maybe_orphan_response(&id(100), false); + + // so we consider #101 orphaned now && will download its parent - #100 + assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); + eth_sync.headers.header_response(header(100).header().clone()); + + // #101 is now Orphan and #100 is MaybeOrphan => we do not want to retrieve + // header #100 again + assert_eq!(eth_sync.headers.header(HeaderStatus::Orphan), Some(&header(101))); + assert_eq!(eth_sync.select_orphan_header_to_download(), None); + + // we can't submit header #100, because its parent status is unknown + assert_eq!(eth_sync.select_headers_to_submit(false), None); + + // instead we are trying to determine status of its parent (#99) + assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeOrphan), Some(&header(100))); + + // and the status is known, so we move previously orphaned #100 and #101 to ready queue + eth_sync.headers.maybe_orphan_response(&id(99), true); + + // and we are ready to submit #100 + assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(100))); + eth_sync.headers.maybe_extra_response(&id(100), false); + assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(100)])); + eth_sync.headers.headers_submitted(vec![id(100)]); + + // and we are ready to submit #101 + assert_eq!(eth_sync.headers.header(HeaderStatus::MaybeExtra), Some(&header(101))); + eth_sync.headers.maybe_extra_response(&id(101), false); + assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); + eth_sync.headers.headers_submitted(vec![id(101)]); + } + + #[test] + fn pruning_happens_on_target_best_header_response() { + let mut eth_sync = HeadersSync::::new(default_sync_params()); + eth_sync.params.prune_depth = 50; + eth_sync.target_best_header_response(id(100)); + assert_eq!(eth_sync.headers.prune_border(), 50); + } + + #[test] + fn only_submitting_headers_in_backup_mode_when_stalled() { + let mut eth_sync = HeadersSync::new(default_sync_params()); + eth_sync.params.target_tx_mode = TargetTransactionMode::Backup; + + // ethereum reports best header #102 + eth_sync.source_best_header_number_response(102); + + // substrate reports that it is at block #100 + eth_sync.target_best_header_response(id(100)); + + // block #101 is downloaded first + eth_sync.headers.header_response(header(101).header().clone()); + eth_sync.headers.maybe_extra_response(&id(101), false); + + // ensure that headers are not submitted when sync is not stalled + assert_eq!(eth_sync.select_headers_to_submit(false), None); + + // ensure that headers are not submitted when sync is stalled + assert_eq!(eth_sync.select_headers_to_submit(true), Some(vec![&header(101)])); + } + + #[test] + fn does_not_select_new_headers_to_submit_when_submit_is_paused() { + let mut eth_sync = HeadersSync::new(default_sync_params()); + eth_sync.params.max_headers_in_submitted_status = 1; + + // ethereum reports best header #102 and substrate is at #100 + eth_sync.source_best_header_number_response(102); + eth_sync.target_best_header_response(id(100)); + + // let's prepare #101 and #102 for submitting + eth_sync.headers.header_response(header(101).header().clone()); + eth_sync.headers.maybe_extra_response(&id(101), false); + eth_sync.headers.header_response(header(102).header().clone()); + eth_sync.headers.maybe_extra_response(&id(102), false); + + // when submit is not paused, we're ready to submit #101 + assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(101)])); + + // when submit is paused, we're not ready to submit anything + eth_sync.pause_submit(); + assert_eq!(eth_sync.select_headers_to_submit(false), None); + + // if best header on substrate node isn't updated, we still not submitting anything + eth_sync.target_best_header_response(id(100)); + assert_eq!(eth_sync.select_headers_to_submit(false), None); + + // but after it is actually updated, we are ready to submit + eth_sync.target_best_header_response(id(101)); + assert_eq!(eth_sync.select_headers_to_submit(false), Some(vec![&header(102)])); + } +} diff --git a/polkadot/relays/headers/src/sync_loop.rs b/polkadot/relays/headers/src/sync_loop.rs new file mode 100644 index 00000000000..e4f1b7b0450 --- /dev/null +++ b/polkadot/relays/headers/src/sync_loop.rs @@ -0,0 +1,637 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Entrypoint for running headers synchronization loop. + +use crate::sync::{HeadersSync, HeadersSyncParams}; +use crate::sync_loop_metrics::SyncLoopMetrics; +use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders}; + +use async_trait::async_trait; +use futures::{future::FutureExt, stream::StreamExt}; +use num_traits::{Saturating, Zero}; +use relay_utils::{ + format_ids, interval, + metrics::{GlobalMetrics, MetricsParams}, + process_future_result, + relay_loop::Client as RelayClient, + retry_backoff, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError, +}; +use std::{ + collections::HashSet, + future::Future, + time::{Duration, Instant}, +}; + +/// When we submit headers to target node, but see no updates of best +/// source block known to target node during STALL_SYNC_TIMEOUT seconds, +/// we consider that our headers are rejected because there has been reorg in target chain. +/// This reorg could invalidate our knowledge about sync process (i.e. we have asked if +/// HeaderA is known to target, but then reorg happened and the answer is different +/// now) => we need to reset sync. +/// The other option is to receive **EVERY** best target header and check if it is +/// direct child of previous best header. But: (1) subscription doesn't guarantee that +/// the subscriber will receive every best header (2) reorg won't always lead to sync +/// stall and restart is a heavy operation (we forget all in-memory headers). +const STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(5 * 60); +/// Delay after we have seen update of best source header at target node, +/// for us to treat sync stalled. ONLY when relay operates in backup mode. +const BACKUP_STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(10 * 60); +/// Interval between calling sync maintain procedure. +const MAINTAIN_INTERVAL: Duration = Duration::from_secs(30); + +/// Source client trait. +#[async_trait] +pub trait SourceClient: RelayClient { + /// Get best block number. + async fn best_block_number(&self) -> Result; + + /// Get header by hash. + async fn header_by_hash(&self, hash: P::Hash) -> Result; + + /// Get canonical header by number. + async fn header_by_number(&self, number: P::Number) -> Result; + + /// Get completion data by header hash. + async fn header_completion(&self, id: HeaderIdOf

) + -> Result<(HeaderIdOf

, Option), Self::Error>; + + /// Get extra data by header hash. + async fn header_extra( + &self, + id: HeaderIdOf

, + header: QueuedHeader

, + ) -> Result<(HeaderIdOf

, P::Extra), Self::Error>; +} + +/// Target client trait. +#[async_trait] +pub trait TargetClient: RelayClient { + /// Returns ID of best header known to the target node. + async fn best_header_id(&self) -> Result, Self::Error>; + + /// Returns true if header is known to the target node. + async fn is_known_header(&self, id: HeaderIdOf

) -> Result<(HeaderIdOf

, bool), Self::Error>; + + /// Submit headers. + async fn submit_headers(&self, headers: Vec>) -> SubmittedHeaders, Self::Error>; + + /// Returns ID of headers that require to be 'completed' before children can be submitted. + async fn incomplete_headers_ids(&self) -> Result>, Self::Error>; + + /// Submit completion data for header. + async fn complete_header(&self, id: HeaderIdOf

, completion: P::Completion) + -> Result, Self::Error>; + + /// Returns true if header requires extra data to be submitted. + async fn requires_extra(&self, header: QueuedHeader

) -> Result<(HeaderIdOf

, bool), Self::Error>; +} + +/// Synchronization maintain procedure. +#[async_trait] +pub trait SyncMaintain: Clone + Send + Sync { + /// Run custom maintain procedures. This is guaranteed to be called when both source and target + /// clients are unoccupied. + async fn maintain(&self, _sync: &mut HeadersSync

) {} +} + +impl SyncMaintain

for () {} + +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +pub fn metrics_prefix() -> String { + format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) +} + +/// Run headers synchronization. +#[allow(clippy::too_many_arguments)] +pub async fn run>( + source_client: impl SourceClient

, + source_tick: Duration, + target_client: TC, + target_tick: Duration, + sync_maintain: impl SyncMaintain

, + sync_params: HeadersSyncParams, + metrics_params: MetricsParams, + exit_signal: impl Future, +) -> Result<(), String> { + let exit_signal = exit_signal.shared(); + relay_utils::relay_loop(source_client, target_client) + .with_metrics(Some(metrics_prefix::

()), metrics_params) + .loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { + run_until_connection_lost( + source_client, + source_tick, + target_client, + target_tick, + sync_maintain.clone(), + sync_params.clone(), + metrics, + exit_signal.clone(), + ) + }) + .await +} + +/// Run headers synchronization. +#[allow(clippy::too_many_arguments)] +async fn run_until_connection_lost>( + source_client: impl SourceClient

, + source_tick: Duration, + target_client: TC, + target_tick: Duration, + sync_maintain: impl SyncMaintain

, + sync_params: HeadersSyncParams, + metrics_sync: Option, + exit_signal: impl Future, +) -> Result<(), FailedClient> { + let mut progress_context = (Instant::now(), None, None); + + let mut sync = HeadersSync::

::new(sync_params); + let mut stall_countdown = None; + let mut last_update_time = Instant::now(); + + let mut source_retry_backoff = retry_backoff(); + let mut source_client_is_online = false; + let mut source_best_block_number_required = false; + let source_best_block_number_future = source_client.best_block_number().fuse(); + let source_new_header_future = futures::future::Fuse::terminated(); + let source_orphan_header_future = futures::future::Fuse::terminated(); + let source_extra_future = futures::future::Fuse::terminated(); + let source_completion_future = futures::future::Fuse::terminated(); + let source_go_offline_future = futures::future::Fuse::terminated(); + let source_tick_stream = interval(source_tick).fuse(); + + let mut target_retry_backoff = retry_backoff(); + let mut target_client_is_online = false; + let mut target_best_block_required = false; + let mut target_incomplete_headers_required = true; + let target_best_block_future = target_client.best_header_id().fuse(); + let target_incomplete_headers_future = futures::future::Fuse::terminated(); + let target_extra_check_future = futures::future::Fuse::terminated(); + let target_existence_status_future = futures::future::Fuse::terminated(); + let target_submit_header_future = futures::future::Fuse::terminated(); + let target_complete_header_future = futures::future::Fuse::terminated(); + let target_go_offline_future = futures::future::Fuse::terminated(); + let target_tick_stream = interval(target_tick).fuse(); + + let mut maintain_required = false; + let maintain_stream = interval(MAINTAIN_INTERVAL).fuse(); + + let exit_signal = exit_signal.fuse(); + + futures::pin_mut!( + source_best_block_number_future, + source_new_header_future, + source_orphan_header_future, + source_extra_future, + source_completion_future, + source_go_offline_future, + source_tick_stream, + target_best_block_future, + target_incomplete_headers_future, + target_extra_check_future, + target_existence_status_future, + target_submit_header_future, + target_complete_header_future, + target_go_offline_future, + target_tick_stream, + maintain_stream, + exit_signal + ); + + loop { + futures::select! { + source_best_block_number = source_best_block_number_future => { + source_best_block_number_required = false; + + source_client_is_online = process_future_result( + source_best_block_number, + &mut source_retry_backoff, + |source_best_block_number| sync.source_best_header_number_response(source_best_block_number), + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving best header number from {}", P::SOURCE_NAME), + ).fail_if_connection_error(FailedClient::Source)?; + }, + source_new_header = source_new_header_future => { + source_client_is_online = process_future_result( + source_new_header, + &mut source_retry_backoff, + |source_new_header| sync.headers_mut().header_response(source_new_header), + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving header from {} node", P::SOURCE_NAME), + ).fail_if_connection_error(FailedClient::Source)?; + }, + source_orphan_header = source_orphan_header_future => { + source_client_is_online = process_future_result( + source_orphan_header, + &mut source_retry_backoff, + |source_orphan_header| sync.headers_mut().header_response(source_orphan_header), + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving orphan header from {} node", P::SOURCE_NAME), + ).fail_if_connection_error(FailedClient::Source)?; + }, + source_extra = source_extra_future => { + source_client_is_online = process_future_result( + source_extra, + &mut source_retry_backoff, + |(header, extra)| sync.headers_mut().extra_response(&header, extra), + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving extra data from {} node", P::SOURCE_NAME), + ).fail_if_connection_error(FailedClient::Source)?; + }, + source_completion = source_completion_future => { + source_client_is_online = process_future_result( + source_completion, + &mut source_retry_backoff, + |(header, completion)| sync.headers_mut().completion_response(&header, completion), + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving completion data from {} node", P::SOURCE_NAME), + ).fail_if_connection_error(FailedClient::Source)?; + }, + _ = source_go_offline_future => { + source_client_is_online = true; + }, + _ = source_tick_stream.next() => { + if sync.is_almost_synced() { + source_best_block_number_required = true; + } + }, + target_best_block = target_best_block_future => { + target_best_block_required = false; + + target_client_is_online = process_future_result( + target_best_block, + &mut target_retry_backoff, + |target_best_block| { + let head_updated = sync.target_best_header_response(target_best_block); + if head_updated { + last_update_time = Instant::now(); + } + match head_updated { + // IF head is updated AND there are still our transactions: + // => restart stall countdown timer + true if sync.headers().headers_in_status(HeaderStatus::Submitted) != 0 => + stall_countdown = Some(Instant::now()), + // IF head is updated AND there are no our transactions: + // => stop stall countdown timer + true => stall_countdown = None, + // IF head is not updated AND stall countdown is not yet completed + // => do nothing + false if stall_countdown + .map(|stall_countdown| stall_countdown.elapsed() < STALL_SYNC_TIMEOUT) + .unwrap_or(true) + => (), + // IF head is not updated AND stall countdown has completed + // => restart sync + false => { + log::info!( + target: "bridge", + "Sync has stalled. Restarting {} headers synchronization.", + P::SOURCE_NAME, + ); + stall_countdown = None; + sync.restart(); + }, + } + }, + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving best known {} header from {} node", P::SOURCE_NAME, P::TARGET_NAME), + ).fail_if_connection_error(FailedClient::Target)?; + }, + incomplete_headers_ids = target_incomplete_headers_future => { + target_incomplete_headers_required = false; + + target_client_is_online = process_future_result( + incomplete_headers_ids, + &mut target_retry_backoff, + |incomplete_headers_ids| sync.headers_mut().incomplete_headers_response(incomplete_headers_ids), + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving incomplete headers from {} node", P::TARGET_NAME), + ).fail_if_connection_error(FailedClient::Target)?; + }, + target_existence_status = target_existence_status_future => { + target_client_is_online = process_future_result( + target_existence_status, + &mut target_retry_backoff, + |(target_header, target_existence_status)| sync + .headers_mut() + .maybe_orphan_response(&target_header, target_existence_status), + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving existence status from {} node", P::TARGET_NAME), + ).fail_if_connection_error(FailedClient::Target)?; + }, + submitted_headers = target_submit_header_future => { + // following line helps Rust understand the type of `submitted_headers` :/ + let submitted_headers: SubmittedHeaders, TC::Error> = submitted_headers; + let submitted_headers_str = format!("{}", submitted_headers); + let all_headers_rejected = submitted_headers.submitted.is_empty() + && submitted_headers.incomplete.is_empty(); + let has_submitted_headers = sync.headers().headers_in_status(HeaderStatus::Submitted) != 0; + + let maybe_fatal_error = match submitted_headers.fatal_error { + Some(fatal_error) => Err(StringifiedMaybeConnectionError::new( + fatal_error.is_connection_error(), + format!("{:?}", fatal_error), + )), + None if all_headers_rejected && !has_submitted_headers => + Err(StringifiedMaybeConnectionError::new(false, "All headers were rejected".into())), + None => Ok(()), + }; + + let no_fatal_error = maybe_fatal_error.is_ok(); + target_client_is_online = process_future_result( + maybe_fatal_error, + &mut target_retry_backoff, + |_| {}, + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error submitting headers to {} node", P::TARGET_NAME), + ).fail_if_connection_error(FailedClient::Target)?; + + log::debug!(target: "bridge", "Header submit result: {}", submitted_headers_str); + + sync.headers_mut().headers_submitted(submitted_headers.submitted); + sync.headers_mut().add_incomplete_headers(false, submitted_headers.incomplete); + + // when there's no fatal error, but node has rejected all our headers we may + // want to pause until our submitted headers will be accepted + if no_fatal_error && all_headers_rejected && has_submitted_headers { + sync.pause_submit(); + } + }, + target_complete_header_result = target_complete_header_future => { + target_client_is_online = process_future_result( + target_complete_header_result, + &mut target_retry_backoff, + |completed_header| sync.headers_mut().header_completed(&completed_header), + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error completing headers at {}", P::TARGET_NAME), + ).fail_if_connection_error(FailedClient::Target)?; + }, + target_extra_check_result = target_extra_check_future => { + target_client_is_online = process_future_result( + target_extra_check_result, + &mut target_retry_backoff, + |(header, extra_check_result)| sync + .headers_mut() + .maybe_extra_response(&header, extra_check_result), + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving receipts requirement from {} node", P::TARGET_NAME), + ).fail_if_connection_error(FailedClient::Target)?; + }, + _ = target_go_offline_future => { + target_client_is_online = true; + }, + _ = target_tick_stream.next() => { + target_best_block_required = true; + target_incomplete_headers_required = true; + }, + + _ = maintain_stream.next() => { + maintain_required = true; + }, + _ = exit_signal => { + return Ok(()); + } + } + + // update metrics + if let Some(ref metrics_sync) = metrics_sync { + metrics_sync.update(&sync); + } + + // print progress + progress_context = print_sync_progress(progress_context, &sync); + + // run maintain procedures + if maintain_required && source_client_is_online && target_client_is_online { + log::debug!(target: "bridge", "Maintaining headers sync loop"); + maintain_required = false; + sync_maintain.maintain(&mut sync).await; + } + + // If the target client is accepting requests we update the requests that + // we want it to run + if !maintain_required && target_client_is_online { + // NOTE: Is is important to reset this so that we only have one + // request being processed by the client at a time. This prevents + // race conditions like receiving two transactions with the same + // nonce from the client. + target_client_is_online = false; + + // The following is how we prioritize requests: + // + // 1. Get best block + // - Stops us from downloading or submitting new blocks + // - Only called rarely + // + // 2. Get incomplete headers + // - Stops us from submitting new blocks + // - Only called rarely + // + // 3. Get complete headers + // - Stops us from submitting new blocks + // + // 4. Check if we need extra data from source + // - Stops us from downloading or submitting new blocks + // + // 5. Check existence of header + // - Stops us from submitting new blocks + // + // 6. Submit header + + if target_best_block_required { + log::debug!(target: "bridge", "Asking {} about best block", P::TARGET_NAME); + target_best_block_future.set(target_client.best_header_id().fuse()); + } else if target_incomplete_headers_required { + log::debug!(target: "bridge", "Asking {} about incomplete headers", P::TARGET_NAME); + target_incomplete_headers_future.set(target_client.incomplete_headers_ids().fuse()); + } else if let Some((id, completion)) = sync.headers_mut().header_to_complete() { + log::debug!( + target: "bridge", + "Going to complete header: {:?}", + id, + ); + + target_complete_header_future.set(target_client.complete_header(id, completion.clone()).fuse()); + } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeExtra) { + log::debug!( + target: "bridge", + "Checking if header submission requires extra: {:?}", + header.id(), + ); + + target_extra_check_future.set(target_client.requires_extra(header.clone()).fuse()); + } else if let Some(header) = sync.headers().header(HeaderStatus::MaybeOrphan) { + // for MaybeOrphan we actually ask for parent' header existence + let parent_id = header.parent_id(); + + log::debug!( + target: "bridge", + "Asking {} node for existence of: {:?}", + P::TARGET_NAME, + parent_id, + ); + + target_existence_status_future.set(target_client.is_known_header(parent_id).fuse()); + } else if let Some(headers) = + sync.select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT) + { + log::debug!( + target: "bridge", + "Submitting {} header(s) to {} node: {:?}", + headers.len(), + P::TARGET_NAME, + format_ids(headers.iter().map(|header| header.id())), + ); + + let headers = headers.into_iter().cloned().collect(); + target_submit_header_future.set(target_client.submit_headers(headers).fuse()); + + // remember that we have submitted some headers + if stall_countdown.is_none() { + stall_countdown = Some(Instant::now()); + } + } else { + target_client_is_online = true; + } + } + + // If the source client is accepting requests we update the requests that + // we want it to run + if !maintain_required && source_client_is_online { + // NOTE: Is is important to reset this so that we only have one + // request being processed by the client at a time. This prevents + // race conditions like receiving two transactions with the same + // nonce from the client. + source_client_is_online = false; + + // The following is how we prioritize requests: + // + // 1. Get best block + // - Stops us from downloading or submitting new blocks + // - Only called rarely + // + // 2. Download completion data + // - Stops us from submitting new blocks + // + // 3. Download extra data + // - Stops us from submitting new blocks + // + // 4. Download missing headers + // - Stops us from downloading or submitting new blocks + // + // 5. Downloading new headers + + if source_best_block_number_required { + log::debug!(target: "bridge", "Asking {} node about best block number", P::SOURCE_NAME); + source_best_block_number_future.set(source_client.best_block_number().fuse()); + } else if let Some(id) = sync.headers_mut().incomplete_header() { + log::debug!( + target: "bridge", + "Retrieving completion data for header: {:?}", + id, + ); + source_completion_future.set(source_client.header_completion(id).fuse()); + } else if let Some(header) = sync.headers().header(HeaderStatus::Extra) { + let id = header.id(); + log::debug!( + target: "bridge", + "Retrieving extra data for header: {:?}", + id, + ); + source_extra_future.set(source_client.header_extra(id, header.clone()).fuse()); + } else if let Some(header) = sync.select_orphan_header_to_download() { + // for Orphan we actually ask for parent' header + let parent_id = header.parent_id(); + + // if we have end up with orphan header#0, then we are misconfigured + if parent_id.0.is_zero() { + log::error!( + target: "bridge", + "Misconfiguration. Genesis {} header is considered orphan by {} node", + P::SOURCE_NAME, + P::TARGET_NAME, + ); + return Ok(()); + } + + log::debug!( + target: "bridge", + "Going to download orphan header from {} node: {:?}", + P::SOURCE_NAME, + parent_id, + ); + + source_orphan_header_future.set(source_client.header_by_hash(parent_id.1).fuse()); + } else if let Some(id) = sync.select_new_header_to_download() { + log::debug!( + target: "bridge", + "Going to download new header from {} node: {:?}", + P::SOURCE_NAME, + id, + ); + + source_new_header_future.set(source_client.header_by_number(id).fuse()); + } else { + source_client_is_online = true; + } + } + } +} + +/// Print synchronization progress. +fn print_sync_progress( + progress_context: (Instant, Option, Option), + eth_sync: &HeadersSync

, +) -> (Instant, Option, Option) { + let (prev_time, prev_best_header, prev_target_header) = progress_context; + let now_time = Instant::now(); + let (now_best_header, now_target_header) = eth_sync.status(); + + let need_update = now_time - prev_time > Duration::from_secs(10) + || match (prev_best_header, now_best_header) { + (Some(prev_best_header), Some(now_best_header)) => { + now_best_header.0.saturating_sub(prev_best_header) > 10.into() + } + _ => false, + }; + if !need_update { + return (prev_time, prev_best_header, prev_target_header); + } + + log::info!( + target: "bridge", + "Synced {:?} of {:?} headers", + now_best_header.map(|id| id.0), + now_target_header, + ); + (now_time, (*now_best_header).map(|id| id.0), *now_target_header) +} diff --git a/polkadot/relays/headers/src/sync_loop_metrics.rs b/polkadot/relays/headers/src/sync_loop_metrics.rs new file mode 100644 index 00000000000..37dae113404 --- /dev/null +++ b/polkadot/relays/headers/src/sync_loop_metrics.rs @@ -0,0 +1,108 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Metrics for headers synchronization relay loop. + +use crate::sync::HeadersSync; +use crate::sync_types::{HeaderStatus, HeadersSyncPipeline}; + +use num_traits::Zero; +use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; + +/// Headers sync metrics. +#[derive(Clone)] +pub struct SyncLoopMetrics { + /// Best syncing headers at "source" and "target" nodes. + best_block_numbers: GaugeVec, + /// Number of headers in given states (see `HeaderStatus`). + blocks_in_state: GaugeVec, +} + +impl SyncLoopMetrics { + /// Create and register headers loop metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(SyncLoopMetrics { + best_block_numbers: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best block numbers on source and target nodes", + ), + &["node"], + )?, + registry, + )?, + blocks_in_state: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "blocks_in_state"), + "Number of blocks in given state", + ), + &["state"], + )?, + registry, + )?, + }) + } +} + +impl SyncLoopMetrics { + /// Update best block number at source. + pub fn update_best_block_at_source>(&self, source_best_number: Number) { + self.best_block_numbers + .with_label_values(&["source"]) + .set(source_best_number.into()); + } + + /// Update best block number at target. + pub fn update_best_block_at_target>(&self, target_best_number: Number) { + self.best_block_numbers + .with_label_values(&["target"]) + .set(target_best_number.into()); + } + + /// Update metrics. + pub fn update(&self, sync: &HeadersSync

) { + let headers = sync.headers(); + let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero); + let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero); + + self.update_best_block_at_source(source_best_number); + self.update_best_block_at_target(target_best_number); + + self.blocks_in_state + .with_label_values(&["maybe_orphan"]) + .set(headers.headers_in_status(HeaderStatus::MaybeOrphan) as _); + self.blocks_in_state + .with_label_values(&["orphan"]) + .set(headers.headers_in_status(HeaderStatus::Orphan) as _); + self.blocks_in_state + .with_label_values(&["maybe_extra"]) + .set(headers.headers_in_status(HeaderStatus::MaybeExtra) as _); + self.blocks_in_state + .with_label_values(&["extra"]) + .set(headers.headers_in_status(HeaderStatus::Extra) as _); + self.blocks_in_state + .with_label_values(&["ready"]) + .set(headers.headers_in_status(HeaderStatus::Ready) as _); + self.blocks_in_state + .with_label_values(&["incomplete"]) + .set(headers.headers_in_status(HeaderStatus::Incomplete) as _); + self.blocks_in_state + .with_label_values(&["submitted"]) + .set(headers.headers_in_status(HeaderStatus::Submitted) as _); + } +} diff --git a/polkadot/relays/headers/src/sync_loop_tests.rs b/polkadot/relays/headers/src/sync_loop_tests.rs new file mode 100644 index 00000000000..3347c4d0d3b --- /dev/null +++ b/polkadot/relays/headers/src/sync_loop_tests.rs @@ -0,0 +1,594 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#![cfg(test)] + +use crate::sync_loop::{run, SourceClient, TargetClient}; +use crate::sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders}; + +use async_trait::async_trait; +use backoff::backoff::Backoff; +use futures::{future::FutureExt, stream::StreamExt}; +use parking_lot::Mutex; +use relay_utils::{ + metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, HeaderId, + MaybeConnectionError, +}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + time::Duration, +}; + +pub type TestNumber = u64; +pub type TestHash = u64; +pub type TestHeaderId = HeaderId; +pub type TestExtra = u64; +pub type TestCompletion = u64; +pub type TestQueuedHeader = QueuedHeader; + +#[derive(Default, Debug, Clone, PartialEq)] +pub struct TestHeader { + pub hash: TestHash, + pub number: TestNumber, + pub parent_hash: TestHash, +} + +impl SourceHeader for TestHeader { + fn id(&self) -> TestHeaderId { + HeaderId(self.number, self.hash) + } + + fn parent_id(&self) -> TestHeaderId { + HeaderId(self.number - 1, self.parent_hash) + } +} + +#[derive(Debug, Clone)] +struct TestError(bool); + +impl MaybeConnectionError for TestError { + fn is_connection_error(&self) -> bool { + self.0 + } +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct TestHeadersSyncPipeline; + +impl HeadersSyncPipeline for TestHeadersSyncPipeline { + const SOURCE_NAME: &'static str = "Source"; + const TARGET_NAME: &'static str = "Target"; + + type Hash = TestHash; + type Number = TestNumber; + type Header = TestHeader; + type Extra = TestExtra; + type Completion = TestCompletion; + + fn estimate_size(_: &TestQueuedHeader) -> usize { + 0 + } +} + +enum SourceMethod { + BestBlockNumber, + HeaderByHash(TestHash), + HeaderByNumber(TestNumber), + HeaderCompletion(TestHeaderId), + HeaderExtra(TestHeaderId, TestQueuedHeader), +} + +#[derive(Clone)] +struct Source { + data: Arc>, + on_method_call: Arc, +} + +struct SourceData { + best_block_number: Result, + header_by_hash: HashMap, + header_by_number: HashMap, + provides_completion: bool, + provides_extra: bool, +} + +impl Source { + pub fn new( + best_block_id: TestHeaderId, + headers: Vec<(bool, TestHeader)>, + on_method_call: impl Fn(SourceMethod, &mut SourceData) + Send + Sync + 'static, + ) -> Self { + Source { + data: Arc::new(Mutex::new(SourceData { + best_block_number: Ok(best_block_id.0), + header_by_hash: headers + .iter() + .map(|(_, header)| (header.hash, header.clone())) + .collect(), + header_by_number: headers + .iter() + .filter_map(|(is_canonical, header)| { + if *is_canonical { + Some((header.hash, header.clone())) + } else { + None + } + }) + .collect(), + provides_completion: true, + provides_extra: true, + })), + on_method_call: Arc::new(on_method_call), + } + } +} + +#[async_trait] +impl RelayClient for Source { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + unimplemented!() + } +} + +#[async_trait] +impl SourceClient for Source { + async fn best_block_number(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(SourceMethod::BestBlockNumber, &mut *data); + data.best_block_number.clone() + } + + async fn header_by_hash(&self, hash: TestHash) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(SourceMethod::HeaderByHash(hash), &mut *data); + data.header_by_hash.get(&hash).cloned().ok_or(TestError(false)) + } + + async fn header_by_number(&self, number: TestNumber) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(SourceMethod::HeaderByNumber(number), &mut *data); + data.header_by_number.get(&number).cloned().ok_or(TestError(false)) + } + + async fn header_completion(&self, id: TestHeaderId) -> Result<(TestHeaderId, Option), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(SourceMethod::HeaderCompletion(id), &mut *data); + if data.provides_completion { + Ok((id, Some(test_completion(id)))) + } else { + Ok((id, None)) + } + } + + async fn header_extra( + &self, + id: TestHeaderId, + header: TestQueuedHeader, + ) -> Result<(TestHeaderId, TestExtra), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(SourceMethod::HeaderExtra(id, header), &mut *data); + if data.provides_extra { + Ok((id, test_extra(id))) + } else { + Err(TestError(false)) + } + } +} + +enum TargetMethod { + BestHeaderId, + IsKnownHeader(TestHeaderId), + SubmitHeaders(Vec), + IncompleteHeadersIds, + CompleteHeader(TestHeaderId, TestCompletion), + RequiresExtra(TestQueuedHeader), +} + +#[derive(Clone)] +struct Target { + data: Arc>, + on_method_call: Arc, +} + +struct TargetData { + best_header_id: Result, + is_known_header_by_hash: HashMap, + submitted_headers: HashMap, + submit_headers_result: Option>, + completed_headers: HashMap, + requires_completion: bool, + requires_extra: bool, +} + +impl Target { + pub fn new( + best_header_id: TestHeaderId, + headers: Vec, + on_method_call: impl Fn(TargetMethod, &mut TargetData) + Send + Sync + 'static, + ) -> Self { + Target { + data: Arc::new(Mutex::new(TargetData { + best_header_id: Ok(best_header_id), + is_known_header_by_hash: headers.iter().map(|header| (header.1, true)).collect(), + submitted_headers: HashMap::new(), + submit_headers_result: None, + completed_headers: HashMap::new(), + requires_completion: false, + requires_extra: false, + })), + on_method_call: Arc::new(on_method_call), + } + } +} + +#[async_trait] +impl RelayClient for Target { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + unimplemented!() + } +} + +#[async_trait] +impl TargetClient for Target { + async fn best_header_id(&self) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(TargetMethod::BestHeaderId, &mut *data); + data.best_header_id.clone() + } + + async fn is_known_header(&self, id: TestHeaderId) -> Result<(TestHeaderId, bool), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(TargetMethod::IsKnownHeader(id), &mut *data); + data.is_known_header_by_hash + .get(&id.1) + .cloned() + .map(|is_known_header| Ok((id, is_known_header))) + .unwrap_or(Ok((id, false))) + } + + async fn submit_headers(&self, headers: Vec) -> SubmittedHeaders { + let mut data = self.data.lock(); + (self.on_method_call)(TargetMethod::SubmitHeaders(headers.clone()), &mut *data); + data.submitted_headers + .extend(headers.iter().map(|header| (header.id().1, header.clone()))); + data.submit_headers_result.take().expect("test must accept headers") + } + + async fn incomplete_headers_ids(&self) -> Result, TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(TargetMethod::IncompleteHeadersIds, &mut *data); + if data.requires_completion { + Ok(data + .submitted_headers + .iter() + .filter(|(hash, _)| !data.completed_headers.contains_key(hash)) + .map(|(_, header)| header.id()) + .collect()) + } else { + Ok(HashSet::new()) + } + } + + async fn complete_header(&self, id: TestHeaderId, completion: TestCompletion) -> Result { + let mut data = self.data.lock(); + (self.on_method_call)(TargetMethod::CompleteHeader(id, completion), &mut *data); + data.completed_headers.insert(id.1, completion); + Ok(id) + } + + async fn requires_extra(&self, header: TestQueuedHeader) -> Result<(TestHeaderId, bool), TestError> { + let mut data = self.data.lock(); + (self.on_method_call)(TargetMethod::RequiresExtra(header.clone()), &mut *data); + if data.requires_extra { + Ok((header.id(), true)) + } else { + Ok((header.id(), false)) + } + } +} + +fn test_tick() -> Duration { + // in ideal world that should have been Duration::from_millis(0), because we do not want + // to sleep in tests at all, but that could lead to `select! {}` always waking on tick + // => not doing actual job + Duration::from_millis(10) +} + +fn test_id(number: TestNumber) -> TestHeaderId { + HeaderId(number, number) +} + +fn test_header(number: TestNumber) -> TestHeader { + let id = test_id(number); + TestHeader { + hash: id.1, + number: id.0, + parent_hash: if number == 0 { + TestHash::default() + } else { + test_id(number - 1).1 + }, + } +} + +fn test_forked_id(number: TestNumber, forked_from: TestNumber) -> TestHeaderId { + const FORK_OFFSET: TestNumber = 1000; + + if number == forked_from { + HeaderId(number, number) + } else { + HeaderId(number, FORK_OFFSET + number) + } +} + +fn test_forked_header(number: TestNumber, forked_from: TestNumber) -> TestHeader { + let id = test_forked_id(number, forked_from); + TestHeader { + hash: id.1, + number: id.0, + parent_hash: if number == 0 { + TestHash::default() + } else { + test_forked_id(number - 1, forked_from).1 + }, + } +} + +fn test_completion(id: TestHeaderId) -> TestCompletion { + id.0 +} + +fn test_extra(id: TestHeaderId) -> TestExtra { + id.0 +} + +fn source_reject_completion(method: &SourceMethod) { + if let SourceMethod::HeaderCompletion(_) = method { + unreachable!("HeaderCompletion request is not expected") + } +} + +fn source_reject_extra(method: &SourceMethod) { + if let SourceMethod::HeaderExtra(_, _) = method { + unreachable!("HeaderExtra request is not expected") + } +} + +fn target_accept_all_headers(method: &TargetMethod, data: &mut TargetData, requires_extra: bool) { + if let TargetMethod::SubmitHeaders(ref submitted) = method { + assert_eq!(submitted.iter().all(|header| header.extra().is_some()), requires_extra,); + + data.submit_headers_result = Some(SubmittedHeaders { + submitted: submitted.iter().map(|header| header.id()).collect(), + ..Default::default() + }); + } +} + +fn target_signal_exit_when_header_submitted( + method: &TargetMethod, + header_id: TestHeaderId, + exit_signal: &futures::channel::mpsc::UnboundedSender<()>, +) { + if let TargetMethod::SubmitHeaders(ref submitted) = method { + if submitted.iter().any(|header| header.id() == header_id) { + exit_signal.unbounded_send(()).unwrap(); + } + } +} + +fn target_signal_exit_when_header_completed( + method: &TargetMethod, + header_id: TestHeaderId, + exit_signal: &futures::channel::mpsc::UnboundedSender<()>, +) { + if let TargetMethod::CompleteHeader(completed_id, _) = method { + if *completed_id == header_id { + exit_signal.unbounded_send(()).unwrap(); + } + } +} + +fn run_backoff_test(result: Result<(), TestError>) -> (Duration, Duration) { + let mut backoff = retry_backoff(); + + // no randomness in tests (otherwise intervals may overlap => asserts are failing) + backoff.randomization_factor = 0f64; + + // increase backoff's current interval + let interval1 = backoff.next_backoff().unwrap(); + let interval2 = backoff.next_backoff().unwrap(); + assert!(interval2 > interval1); + + // successful future result leads to backoff's reset + let go_offline_future = futures::future::Fuse::terminated(); + futures::pin_mut!(go_offline_future); + + process_future_result( + result, + &mut backoff, + |_| {}, + &mut go_offline_future, + async_std::task::sleep, + || "Test error".into(), + ); + + (interval2, backoff.next_backoff().unwrap()) +} + +#[test] +fn process_future_result_resets_backoff_on_success() { + let (interval2, interval_after_reset) = run_backoff_test(Ok(())); + assert!(interval2 > interval_after_reset); +} + +#[test] +fn process_future_result_resets_backoff_on_connection_error() { + let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(true))); + assert!(interval2 > interval_after_reset); +} + +#[test] +fn process_future_result_does_not_reset_backoff_on_non_connection_error() { + let (interval2, interval_after_reset) = run_backoff_test(Err(TestError(false))); + assert!(interval2 < interval_after_reset); +} + +struct SyncLoopTestParams { + best_source_header: TestHeader, + headers_on_source: Vec<(bool, TestHeader)>, + best_target_header: TestHeader, + headers_on_target: Vec, + target_requires_extra: bool, + target_requires_completion: bool, + stop_at: TestHeaderId, +} + +fn run_sync_loop_test(params: SyncLoopTestParams) { + let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); + let target_requires_extra = params.target_requires_extra; + let target_requires_completion = params.target_requires_completion; + let stop_at = params.stop_at; + let source = Source::new( + params.best_source_header.id(), + params.headers_on_source, + move |method, _| { + if !target_requires_extra { + source_reject_extra(&method); + } + if !target_requires_completion { + source_reject_completion(&method); + } + }, + ); + let target = Target::new( + params.best_target_header.id(), + params.headers_on_target.into_iter().map(|header| header.id()).collect(), + move |method, data| { + target_accept_all_headers(&method, data, target_requires_extra); + if target_requires_completion { + target_signal_exit_when_header_completed(&method, stop_at, &exit_sender); + } else { + target_signal_exit_when_header_submitted(&method, stop_at, &exit_sender); + } + }, + ); + target.data.lock().requires_extra = target_requires_extra; + target.data.lock().requires_completion = target_requires_completion; + + let _ = async_std::task::block_on(run( + source, + test_tick(), + target, + test_tick(), + (), + crate::sync::tests::default_sync_params(), + MetricsParams::disabled(), + exit_receiver.into_future().map(|(_, _)| ()), + )); +} + +#[test] +fn sync_loop_is_able_to_synchronize_single_header() { + run_sync_loop_test(SyncLoopTestParams { + best_source_header: test_header(1), + headers_on_source: vec![(true, test_header(1))], + best_target_header: test_header(0), + headers_on_target: vec![test_header(0)], + target_requires_extra: false, + target_requires_completion: false, + stop_at: test_id(1), + }); +} + +#[test] +fn sync_loop_is_able_to_synchronize_single_header_with_extra() { + run_sync_loop_test(SyncLoopTestParams { + best_source_header: test_header(1), + headers_on_source: vec![(true, test_header(1))], + best_target_header: test_header(0), + headers_on_target: vec![test_header(0)], + target_requires_extra: true, + target_requires_completion: false, + stop_at: test_id(1), + }); +} + +#[test] +fn sync_loop_is_able_to_synchronize_single_header_with_completion() { + run_sync_loop_test(SyncLoopTestParams { + best_source_header: test_header(1), + headers_on_source: vec![(true, test_header(1))], + best_target_header: test_header(0), + headers_on_target: vec![test_header(0)], + target_requires_extra: false, + target_requires_completion: true, + stop_at: test_id(1), + }); +} + +#[test] +fn sync_loop_is_able_to_reorganize_from_shorter_fork() { + run_sync_loop_test(SyncLoopTestParams { + best_source_header: test_header(3), + headers_on_source: vec![ + (true, test_header(1)), + (true, test_header(2)), + (true, test_header(3)), + (false, test_forked_header(1, 0)), + (false, test_forked_header(2, 0)), + ], + best_target_header: test_forked_header(2, 0), + headers_on_target: vec![test_header(0), test_forked_header(1, 0), test_forked_header(2, 0)], + target_requires_extra: false, + target_requires_completion: false, + stop_at: test_id(3), + }); +} + +#[test] +fn sync_loop_is_able_to_reorganize_from_longer_fork() { + run_sync_loop_test(SyncLoopTestParams { + best_source_header: test_header(3), + headers_on_source: vec![ + (true, test_header(1)), + (true, test_header(2)), + (true, test_header(3)), + (false, test_forked_header(1, 0)), + (false, test_forked_header(2, 0)), + (false, test_forked_header(3, 0)), + (false, test_forked_header(4, 0)), + (false, test_forked_header(5, 0)), + ], + best_target_header: test_forked_header(5, 0), + headers_on_target: vec![ + test_header(0), + test_forked_header(1, 0), + test_forked_header(2, 0), + test_forked_header(3, 0), + test_forked_header(4, 0), + test_forked_header(5, 0), + ], + target_requires_extra: false, + target_requires_completion: false, + stop_at: test_id(3), + }); +} diff --git a/polkadot/relays/headers/src/sync_types.rs b/polkadot/relays/headers/src/sync_types.rs new file mode 100644 index 00000000000..e6500ad5fac --- /dev/null +++ b/polkadot/relays/headers/src/sync_types.rs @@ -0,0 +1,189 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Types that are used by headers synchronization components. + +use relay_utils::{format_ids, HeaderId}; +use std::{ops::Deref, sync::Arc}; + +/// Ethereum header synchronization status. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum HeaderStatus { + /// Header is unknown. + Unknown, + /// Header is in MaybeOrphan queue. + MaybeOrphan, + /// Header is in Orphan queue. + Orphan, + /// Header is in MaybeExtra queue. + MaybeExtra, + /// Header is in Extra queue. + Extra, + /// Header is in Ready queue. + Ready, + /// Header is in Incomplete queue. + Incomplete, + /// Header has been recently submitted to the target node. + Submitted, + /// Header is known to the target node. + Synced, +} + +/// Headers synchronization pipeline. +pub trait HeadersSyncPipeline: Clone + Send + Sync { + /// Name of the headers source. + const SOURCE_NAME: &'static str; + /// Name of the headers target. + const TARGET_NAME: &'static str; + + /// Headers we're syncing are identified by this hash. + type Hash: Eq + Clone + Copy + Send + Sync + std::fmt::Debug + std::fmt::Display + std::hash::Hash; + /// Headers we're syncing are identified by this number. + type Number: relay_utils::BlockNumberBase; + /// Type of header that we're syncing. + type Header: SourceHeader; + /// Type of extra data for the header that we're receiving from the source node: + /// 1) extra data is required for some headers; + /// 2) target node may answer if it'll require extra data before header is submitted; + /// 3) extra data available since the header creation time; + /// 4) header and extra data are submitted in single transaction. + /// + /// Example: Ethereum transactions receipts. + type Extra: Clone + Send + Sync + PartialEq + std::fmt::Debug; + /// Type of data required to 'complete' header that we're receiving from the source node: + /// 1) completion data is required for some headers; + /// 2) target node can't answer if it'll require completion data before header is accepted; + /// 3) completion data may be generated after header generation; + /// 4) header and completion data are submitted in separate transactions. + /// + /// Example: Substrate GRANDPA justifications. + type Completion: Clone + Send + Sync + std::fmt::Debug; + + /// Function used to estimate size of target-encoded header. + fn estimate_size(source: &QueuedHeader) -> usize; +} + +/// A HeaderId for `HeaderSyncPipeline`. +pub type HeaderIdOf

= HeaderId<

::Hash,

::Number>; + +/// Header that we're receiving from source node. +pub trait SourceHeader: Clone + std::fmt::Debug + PartialEq + Send + Sync { + /// Returns ID of header. + fn id(&self) -> HeaderId; + /// Returns ID of parent header. + /// + /// Panics if called for genesis header. + fn parent_id(&self) -> HeaderId; +} + +/// Header how it's stored in the synchronization queue. +#[derive(Clone, Debug, PartialEq)] +pub struct QueuedHeader(Arc>); + +impl QueuedHeader

{ + /// Creates new queued header. + pub fn new(header: P::Header) -> Self { + QueuedHeader(Arc::new(QueuedHeaderData { header, extra: None })) + } + + /// Set associated extra data. + pub fn set_extra(self, extra: P::Extra) -> Self { + QueuedHeader(Arc::new(QueuedHeaderData { + header: Arc::try_unwrap(self.0) + .map(|data| data.header) + .unwrap_or_else(|data| data.header.clone()), + extra: Some(extra), + })) + } +} + +impl Deref for QueuedHeader

{ + type Target = QueuedHeaderData

; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Header how it's stored in the synchronization queue. +#[derive(Clone, Debug, Default, PartialEq)] +pub struct QueuedHeaderData { + header: P::Header, + extra: Option, +} + +impl QueuedHeader

{ + /// Returns ID of header. + pub fn id(&self) -> HeaderId { + self.header.id() + } + + /// Returns ID of parent header. + pub fn parent_id(&self) -> HeaderId { + self.header.parent_id() + } + + /// Returns reference to header. + pub fn header(&self) -> &P::Header { + &self.header + } + + /// Returns reference to associated extra data. + pub fn extra(&self) -> &Option { + &self.extra + } +} + +/// Headers submission result. +#[derive(Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct SubmittedHeaders { + /// IDs of headers that have been submitted to target node. + pub submitted: Vec, + /// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted` vec), + /// but all descendants are not. + pub incomplete: Vec, + /// IDs of ignored headers that we have decided not to submit (they're either rejected by + /// target node immediately, or they're descendants of incomplete headers). + pub rejected: Vec, + /// Fatal target node error, if it has occured during submission. + pub fatal_error: Option, +} + +impl Default for SubmittedHeaders { + fn default() -> Self { + SubmittedHeaders { + submitted: Vec::new(), + incomplete: Vec::new(), + rejected: Vec::new(), + fatal_error: None, + } + } +} + +impl std::fmt::Display for SubmittedHeaders { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let submitted = format_ids(self.submitted.iter()); + let incomplete = format_ids(self.incomplete.iter()); + let rejected = format_ids(self.rejected.iter()); + + write!( + f, + "Submitted: {}, Incomplete: {}, Rejected: {}", + submitted, incomplete, rejected + ) + } +} diff --git a/polkadot/relays/messages/Cargo.toml b/polkadot/relays/messages/Cargo.toml new file mode 100644 index 00000000000..e02f8ccc868 --- /dev/null +++ b/polkadot/relays/messages/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "messages-relay" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +async-std = "1.6.5" +async-trait = "0.1.40" +futures = "0.3.5" +hex = "0.4" +log = "0.4.11" +parking_lot = "0.11.0" + +# Bridge Dependencies + +bp-messages = { path = "../../primitives/messages" } +relay-utils = { path = "../utils" } diff --git a/polkadot/relays/messages/src/lib.rs b/polkadot/relays/messages/src/lib.rs new file mode 100644 index 00000000000..cdd94bca954 --- /dev/null +++ b/polkadot/relays/messages/src/lib.rs @@ -0,0 +1,36 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relaying [`pallet-bridge-messages`](../pallet_bridge_messages/index.html) application specific +//! data. Message lane allows sending arbitrary messages between bridged chains. This +//! module provides entrypoint that starts reading messages from given message lane +//! of source chain and submits proof-of-message-at-source-chain transactions to the +//! target chain. Additionaly, proofs-of-messages-delivery are sent back from the +//! target chain to the source chain. + +// required for futures::select! +#![recursion_limit = "1024"] +#![warn(missing_docs)] + +mod metrics; + +pub mod message_lane; +pub mod message_lane_loop; + +mod message_race_delivery; +mod message_race_loop; +mod message_race_receiving; +mod message_race_strategy; diff --git a/polkadot/relays/messages/src/message_lane.rs b/polkadot/relays/messages/src/message_lane.rs new file mode 100644 index 00000000000..5090ef124e7 --- /dev/null +++ b/polkadot/relays/messages/src/message_lane.rs @@ -0,0 +1,52 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! One-way message lane types. Within single one-way lane we have three 'races' where we try to: +//! +//! 1) relay new messages from source to target node; +//! 2) relay proof-of-delivery from target to source node. + +use relay_utils::{BlockNumberBase, HeaderId}; +use std::fmt::Debug; + +/// One-way message lane. +pub trait MessageLane: Clone + Send + Sync { + /// Name of the messages source. + const SOURCE_NAME: &'static str; + /// Name of the messages target. + const TARGET_NAME: &'static str; + + /// Messages proof. + type MessagesProof: Clone + Debug + Send + Sync; + /// Messages receiving proof. + type MessagesReceivingProof: Clone + Debug + Send + Sync; + + /// Number of the source header. + type SourceHeaderNumber: BlockNumberBase; + /// Hash of the source header. + type SourceHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; + + /// Number of the target header. + type TargetHeaderNumber: BlockNumberBase; + /// Hash of the target header. + type TargetHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; +} + +/// Source header id within given one-way message lane. +pub type SourceHeaderIdOf

= HeaderId<

::SourceHeaderHash,

::SourceHeaderNumber>; + +/// Target header id within given one-way message lane. +pub type TargetHeaderIdOf

= HeaderId<

::TargetHeaderHash,

::TargetHeaderNumber>; diff --git a/polkadot/relays/messages/src/message_lane_loop.rs b/polkadot/relays/messages/src/message_lane_loop.rs new file mode 100644 index 00000000000..41eee606d82 --- /dev/null +++ b/polkadot/relays/messages/src/message_lane_loop.rs @@ -0,0 +1,865 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Message delivery loop. Designed to work with messages pallet. +//! +//! Single relay instance delivers messages of single lane in single direction. +//! To serve two-way lane, you would need two instances of relay. +//! To serve N two-way lanes, you would need N*2 instances of relay. +//! +//! Please keep in mind that the best header in this file is actually best +//! finalized header. I.e. when talking about headers in lane context, we +//! only care about finalized headers. + +use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; +use crate::message_race_delivery::run as run_message_delivery_race; +use crate::message_race_receiving::run as run_message_receiving_race; +use crate::metrics::MessageLaneLoopMetrics; + +use async_trait::async_trait; +use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; +use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; +use relay_utils::{ + interval, + metrics::{GlobalMetrics, MetricsParams}, + process_future_result, + relay_loop::Client as RelayClient, + retry_backoff, FailedClient, +}; +use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration}; + +/// Message lane loop configuration params. +#[derive(Debug, Clone)] +pub struct Params { + /// Id of lane this loop is servicing. + pub lane: LaneId, + /// Interval at which we ask target node about its updates. + pub source_tick: Duration, + /// Interval at which we ask target node about its updates. + pub target_tick: Duration, + /// Delay between moments when connection error happens and our reconnect attempt. + pub reconnect_delay: Duration, + /// The loop will auto-restart if there has been no updates during this period. + pub stall_timeout: Duration, + /// Message delivery race parameters. + pub delivery_params: MessageDeliveryParams, +} + +/// Message delivery race parameters. +#[derive(Debug, Clone)] +pub struct MessageDeliveryParams { + /// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number of entries + /// in the `InboundLaneData::relayers` set, all new messages will be rejected until reward payment will + /// be proved (by including outbound lane state to the message delivery transaction). + pub max_unrewarded_relayer_entries_at_target: MessageNonce, + /// Message delivery race will stop delivering messages if there are `max_unconfirmed_nonces_at_target` + /// unconfirmed nonces on the target node. The race would continue once they're confirmed by the + /// receiving race. + pub max_unconfirmed_nonces_at_target: MessageNonce, + /// Maximal number of relayed messages in single delivery transaction. + pub max_messages_in_single_batch: MessageNonce, + /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. + pub max_messages_weight_in_single_batch: Weight, + /// Maximal cumulative size of relayed messages in single delivery transaction. + pub max_messages_size_in_single_batch: usize, +} + +/// Message weights. +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct MessageWeights { + /// Message dispatch weight. + pub weight: Weight, + /// Message size (number of bytes in encoded payload). + pub size: usize, +} + +/// Messages weights map. +pub type MessageWeightsMap = BTreeMap; + +/// Message delivery race proof parameters. +#[derive(Debug, PartialEq)] +pub struct MessageProofParameters { + /// Include outbound lane state proof? + pub outbound_state_proof_required: bool, + /// Cumulative dispatch weight of messages that we're building proof for. + pub dispatch_weight: Weight, +} + +/// Source client trait. +#[async_trait] +pub trait SourceClient: RelayClient { + /// Returns state of the client. + async fn state(&self) -> Result, Self::Error>; + + /// Get nonce of instance of latest generated message. + async fn latest_generated_nonce( + &self, + id: SourceHeaderIdOf

, + ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; + /// Get nonce of the latest message, which receiving has been confirmed by the target chain. + async fn latest_confirmed_received_nonce( + &self, + id: SourceHeaderIdOf

, + ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; + + /// Returns mapping of message nonces, generated on this client, to their weights. + /// + /// Some weights may be missing from returned map, if corresponding messages were pruned at + /// the source chain. + async fn generated_messages_weights( + &self, + id: SourceHeaderIdOf

, + nonces: RangeInclusive, + ) -> Result; + + /// Prove messages in inclusive range [begin; end]. + async fn prove_messages( + &self, + id: SourceHeaderIdOf

, + nonces: RangeInclusive, + proof_parameters: MessageProofParameters, + ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error>; + + /// Submit messages receiving proof. + async fn submit_messages_receiving_proof( + &self, + generated_at_block: TargetHeaderIdOf

, + proof: P::MessagesReceivingProof, + ) -> Result<(), Self::Error>; + + /// We need given finalized target header on source to continue synchronization. + async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

); +} + +/// Target client trait. +#[async_trait] +pub trait TargetClient: RelayClient { + /// Returns state of the client. + async fn state(&self) -> Result, Self::Error>; + + /// Get nonce of latest received message. + async fn latest_received_nonce( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; + + /// Get nonce of latest confirmed message. + async fn latest_confirmed_received_nonce( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; + /// Get state of unrewarded relayers set at the inbound lane. + async fn unrewarded_relayers_state( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, UnrewardedRelayersState), Self::Error>; + + /// Prove messages receiving at given block. + async fn prove_messages_receiving( + &self, + id: TargetHeaderIdOf

, + ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), Self::Error>; + + /// Submit messages proof. + async fn submit_messages_proof( + &self, + generated_at_header: SourceHeaderIdOf

, + nonces: RangeInclusive, + proof: P::MessagesProof, + ) -> Result, Self::Error>; + + /// We need given finalized source header on target to continue synchronization. + async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

); +} + +/// State of the client. +#[derive(Clone, Debug, Default, PartialEq)] +pub struct ClientState { + /// Best header id of this chain. + pub best_self: SelfHeaderId, + /// Best finalized header id of this chain. + pub best_finalized_self: SelfHeaderId, + /// Best finalized header id of the peer chain read at the best block of this chain (at `best_finalized_self`). + pub best_finalized_peer_at_best_self: PeerHeaderId, +} + +/// State of source client in one-way message lane. +pub type SourceClientState

= ClientState, TargetHeaderIdOf

>; + +/// State of target client in one-way message lane. +pub type TargetClientState

= ClientState, SourceHeaderIdOf

>; + +/// Both clients state. +#[derive(Debug, Default)] +pub struct ClientsState { + /// Source client state. + pub source: Option>, + /// Target client state. + pub target: Option>, +} + +/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop. +pub fn metrics_prefix(lane: &LaneId) -> String { + format!( + "{}_to_{}_MessageLane_{}", + P::SOURCE_NAME, + P::TARGET_NAME, + hex::encode(lane) + ) +} + +/// Run message lane service loop. +pub async fn run( + params: Params, + source_client: impl SourceClient

, + target_client: impl TargetClient

, + metrics_params: MetricsParams, + exit_signal: impl Future, +) -> Result<(), String> { + let exit_signal = exit_signal.shared(); + relay_utils::relay_loop(source_client, target_client) + .reconnect_delay(params.reconnect_delay) + .with_metrics(Some(metrics_prefix::

(¶ms.lane)), metrics_params) + .loop_metric(|registry, prefix| MessageLaneLoopMetrics::new(registry, prefix))? + .standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))? + .expose() + .await? + .run(|source_client, target_client, metrics| { + run_until_connection_lost( + params.clone(), + source_client, + target_client, + metrics, + exit_signal.clone(), + ) + }) + .await +} + +/// Run one-way message delivery loop until connection with target or source node is lost, or exit signal is received. +async fn run_until_connection_lost, TC: TargetClient

>( + params: Params, + source_client: SC, + target_client: TC, + metrics_msg: Option, + exit_signal: impl Future, +) -> Result<(), FailedClient> { + let mut source_retry_backoff = retry_backoff(); + let mut source_client_is_online = false; + let mut source_state_required = true; + let source_state = source_client.state().fuse(); + let source_go_offline_future = futures::future::Fuse::terminated(); + let source_tick_stream = interval(params.source_tick).fuse(); + + let mut target_retry_backoff = retry_backoff(); + let mut target_client_is_online = false; + let mut target_state_required = true; + let target_state = target_client.state().fuse(); + let target_go_offline_future = futures::future::Fuse::terminated(); + let target_tick_stream = interval(params.target_tick).fuse(); + + let ( + (delivery_source_state_sender, delivery_source_state_receiver), + (delivery_target_state_sender, delivery_target_state_receiver), + ) = (unbounded(), unbounded()); + let delivery_race_loop = run_message_delivery_race( + source_client.clone(), + delivery_source_state_receiver, + target_client.clone(), + delivery_target_state_receiver, + params.stall_timeout, + metrics_msg.clone(), + params.delivery_params, + ) + .fuse(); + + let ( + (receiving_source_state_sender, receiving_source_state_receiver), + (receiving_target_state_sender, receiving_target_state_receiver), + ) = (unbounded(), unbounded()); + let receiving_race_loop = run_message_receiving_race( + source_client.clone(), + receiving_source_state_receiver, + target_client.clone(), + receiving_target_state_receiver, + params.stall_timeout, + metrics_msg.clone(), + ) + .fuse(); + + let exit_signal = exit_signal.fuse(); + + futures::pin_mut!( + source_state, + source_go_offline_future, + source_tick_stream, + target_state, + target_go_offline_future, + target_tick_stream, + delivery_race_loop, + receiving_race_loop, + exit_signal + ); + + loop { + futures::select! { + new_source_state = source_state => { + source_state_required = false; + + source_client_is_online = process_future_result( + new_source_state, + &mut source_retry_backoff, + |new_source_state| { + log::debug!( + target: "bridge", + "Received state from {} node: {:?}", + P::SOURCE_NAME, + new_source_state, + ); + let _ = delivery_source_state_sender.unbounded_send(new_source_state.clone()); + let _ = receiving_source_state_sender.unbounded_send(new_source_state.clone()); + + if let Some(metrics_msg) = metrics_msg.as_ref() { + metrics_msg.update_source_state::

(new_source_state); + } + }, + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving state from {} node", P::SOURCE_NAME), + ).fail_if_connection_error(FailedClient::Source)?; + }, + _ = source_go_offline_future => { + source_client_is_online = true; + }, + _ = source_tick_stream.next() => { + source_state_required = true; + }, + new_target_state = target_state => { + target_state_required = false; + + target_client_is_online = process_future_result( + new_target_state, + &mut target_retry_backoff, + |new_target_state| { + log::debug!( + target: "bridge", + "Received state from {} node: {:?}", + P::TARGET_NAME, + new_target_state, + ); + let _ = delivery_target_state_sender.unbounded_send(new_target_state.clone()); + let _ = receiving_target_state_sender.unbounded_send(new_target_state.clone()); + + if let Some(metrics_msg) = metrics_msg.as_ref() { + metrics_msg.update_target_state::

(new_target_state); + } + }, + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving state from {} node", P::TARGET_NAME), + ).fail_if_connection_error(FailedClient::Target)?; + }, + _ = target_go_offline_future => { + target_client_is_online = true; + }, + _ = target_tick_stream.next() => { + target_state_required = true; + }, + + delivery_error = delivery_race_loop => { + match delivery_error { + Ok(_) => unreachable!("only ends with error; qed"), + Err(err) => return Err(err), + } + }, + receiving_error = receiving_race_loop => { + match receiving_error { + Ok(_) => unreachable!("only ends with error; qed"), + Err(err) => return Err(err), + } + }, + + () = exit_signal => { + return Ok(()); + } + } + + if source_client_is_online && source_state_required { + log::debug!(target: "bridge", "Asking {} node about its state", P::SOURCE_NAME); + source_state.set(source_client.state().fuse()); + source_client_is_online = false; + } + + if target_client_is_online && target_state_required { + log::debug!(target: "bridge", "Asking {} node about its state", P::TARGET_NAME); + target_state.set(target_client.state().fuse()); + target_client_is_online = false; + } + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use futures::stream::StreamExt; + use parking_lot::Mutex; + use relay_utils::{HeaderId, MaybeConnectionError}; + use std::sync::Arc; + + pub fn header_id(number: TestSourceHeaderNumber) -> TestSourceHeaderId { + HeaderId(number, number) + } + + pub type TestSourceHeaderId = HeaderId; + pub type TestTargetHeaderId = HeaderId; + + pub type TestMessagesProof = (RangeInclusive, Option); + pub type TestMessagesReceivingProof = MessageNonce; + + pub type TestSourceHeaderNumber = u64; + pub type TestSourceHeaderHash = u64; + + pub type TestTargetHeaderNumber = u64; + pub type TestTargetHeaderHash = u64; + + #[derive(Debug)] + pub struct TestError; + + impl MaybeConnectionError for TestError { + fn is_connection_error(&self) -> bool { + true + } + } + + #[derive(Clone)] + pub struct TestMessageLane; + + impl MessageLane for TestMessageLane { + const SOURCE_NAME: &'static str = "TestSource"; + const TARGET_NAME: &'static str = "TestTarget"; + + type MessagesProof = TestMessagesProof; + type MessagesReceivingProof = TestMessagesReceivingProof; + + type SourceHeaderNumber = TestSourceHeaderNumber; + type SourceHeaderHash = TestSourceHeaderHash; + + type TargetHeaderNumber = TestTargetHeaderNumber; + type TargetHeaderHash = TestTargetHeaderHash; + } + + #[derive(Debug, Default, Clone)] + pub struct TestClientData { + is_source_fails: bool, + is_source_reconnected: bool, + source_state: SourceClientState, + source_latest_generated_nonce: MessageNonce, + source_latest_confirmed_received_nonce: MessageNonce, + submitted_messages_receiving_proofs: Vec, + is_target_fails: bool, + is_target_reconnected: bool, + target_state: SourceClientState, + target_latest_received_nonce: MessageNonce, + target_latest_confirmed_received_nonce: MessageNonce, + submitted_messages_proofs: Vec, + target_to_source_header_required: Option, + target_to_source_header_requirements: Vec, + source_to_target_header_required: Option, + source_to_target_header_requirements: Vec, + } + + #[derive(Clone)] + pub struct TestSourceClient { + data: Arc>, + tick: Arc, + } + + #[async_trait] + impl RelayClient for TestSourceClient { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + data.is_source_reconnected = true; + } + Ok(()) + } + } + + #[async_trait] + impl SourceClient for TestSourceClient { + async fn state(&self) -> Result, TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + if data.is_source_fails { + return Err(TestError); + } + Ok(data.source_state.clone()) + } + + async fn latest_generated_nonce( + &self, + id: SourceHeaderIdOf, + ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + if data.is_source_fails { + return Err(TestError); + } + Ok((id, data.source_latest_generated_nonce)) + } + + async fn latest_confirmed_received_nonce( + &self, + id: SourceHeaderIdOf, + ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + Ok((id, data.source_latest_confirmed_received_nonce)) + } + + async fn generated_messages_weights( + &self, + _id: SourceHeaderIdOf, + nonces: RangeInclusive, + ) -> Result { + Ok(nonces + .map(|nonce| (nonce, MessageWeights { weight: 1, size: 1 })) + .collect()) + } + + async fn prove_messages( + &self, + id: SourceHeaderIdOf, + nonces: RangeInclusive, + proof_parameters: MessageProofParameters, + ) -> Result< + ( + SourceHeaderIdOf, + RangeInclusive, + TestMessagesProof, + ), + TestError, + > { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + Ok(( + id, + nonces.clone(), + ( + nonces, + if proof_parameters.outbound_state_proof_required { + Some(data.source_latest_confirmed_received_nonce) + } else { + None + }, + ), + )) + } + + async fn submit_messages_receiving_proof( + &self, + _generated_at_block: TargetHeaderIdOf, + proof: TestMessagesReceivingProof, + ) -> Result<(), TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + data.submitted_messages_receiving_proofs.push(proof); + data.source_latest_confirmed_received_nonce = proof; + Ok(()) + } + + async fn require_target_header_on_source(&self, id: TargetHeaderIdOf) { + let mut data = self.data.lock(); + data.target_to_source_header_required = Some(id); + data.target_to_source_header_requirements.push(id); + (self.tick)(&mut *data); + } + } + + #[derive(Clone)] + pub struct TestTargetClient { + data: Arc>, + tick: Arc, + } + + #[async_trait] + impl RelayClient for TestTargetClient { + type Error = TestError; + + async fn reconnect(&mut self) -> Result<(), TestError> { + { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + data.is_target_reconnected = true; + } + Ok(()) + } + } + + #[async_trait] + impl TargetClient for TestTargetClient { + async fn state(&self) -> Result, TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + if data.is_target_fails { + return Err(TestError); + } + Ok(data.target_state.clone()) + } + + async fn latest_received_nonce( + &self, + id: TargetHeaderIdOf, + ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + if data.is_target_fails { + return Err(TestError); + } + Ok((id, data.target_latest_received_nonce)) + } + + async fn unrewarded_relayers_state( + &self, + id: TargetHeaderIdOf, + ) -> Result<(TargetHeaderIdOf, UnrewardedRelayersState), TestError> { + Ok(( + id, + UnrewardedRelayersState { + unrewarded_relayer_entries: 0, + messages_in_oldest_entry: 0, + total_messages: 0, + }, + )) + } + + async fn latest_confirmed_received_nonce( + &self, + id: TargetHeaderIdOf, + ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + if data.is_target_fails { + return Err(TestError); + } + Ok((id, data.target_latest_confirmed_received_nonce)) + } + + async fn prove_messages_receiving( + &self, + id: TargetHeaderIdOf, + ) -> Result<(TargetHeaderIdOf, TestMessagesReceivingProof), TestError> { + Ok((id, self.data.lock().target_latest_received_nonce)) + } + + async fn submit_messages_proof( + &self, + _generated_at_header: SourceHeaderIdOf, + nonces: RangeInclusive, + proof: TestMessagesProof, + ) -> Result, TestError> { + let mut data = self.data.lock(); + (self.tick)(&mut *data); + if data.is_target_fails { + return Err(TestError); + } + data.target_state.best_self = + HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); + data.target_latest_received_nonce = *proof.0.end(); + if let Some(target_latest_confirmed_received_nonce) = proof.1 { + data.target_latest_confirmed_received_nonce = target_latest_confirmed_received_nonce; + } + data.submitted_messages_proofs.push(proof); + Ok(nonces) + } + + async fn require_source_header_on_target(&self, id: SourceHeaderIdOf) { + let mut data = self.data.lock(); + data.source_to_target_header_required = Some(id); + data.source_to_target_header_requirements.push(id); + (self.tick)(&mut *data); + } + } + + fn run_loop_test( + data: TestClientData, + source_tick: Arc, + target_tick: Arc, + exit_signal: impl Future, + ) -> TestClientData { + async_std::task::block_on(async { + let data = Arc::new(Mutex::new(data)); + + let source_client = TestSourceClient { + data: data.clone(), + tick: source_tick, + }; + let target_client = TestTargetClient { + data: data.clone(), + tick: target_tick, + }; + let _ = run( + Params { + lane: [0, 0, 0, 0], + source_tick: Duration::from_millis(100), + target_tick: Duration::from_millis(100), + reconnect_delay: Duration::from_millis(0), + stall_timeout: Duration::from_millis(60 * 1000), + delivery_params: MessageDeliveryParams { + max_unrewarded_relayer_entries_at_target: 4, + max_unconfirmed_nonces_at_target: 4, + max_messages_in_single_batch: 4, + max_messages_weight_in_single_batch: 4, + max_messages_size_in_single_batch: 4, + }, + }, + source_client, + target_client, + MetricsParams::disabled(), + exit_signal, + ) + .await; + let result = data.lock().clone(); + result + }) + } + + #[test] + fn message_lane_loop_is_able_to_recover_from_connection_errors() { + // with this configuration, source client will return Err, making source client + // reconnect. Then the target client will fail with Err + reconnect. Then we finally + // able to deliver messages. + let (exit_sender, exit_receiver) = unbounded(); + let result = run_loop_test( + TestClientData { + is_source_fails: true, + source_state: ClientState { + best_self: HeaderId(0, 0), + best_finalized_self: HeaderId(0, 0), + best_finalized_peer_at_best_self: HeaderId(0, 0), + }, + source_latest_generated_nonce: 1, + target_state: ClientState { + best_self: HeaderId(0, 0), + best_finalized_self: HeaderId(0, 0), + best_finalized_peer_at_best_self: HeaderId(0, 0), + }, + target_latest_received_nonce: 0, + ..Default::default() + }, + Arc::new(|data: &mut TestClientData| { + if data.is_source_reconnected { + data.is_source_fails = false; + data.is_target_fails = true; + } + }), + Arc::new(move |data: &mut TestClientData| { + if data.is_target_reconnected { + data.is_target_fails = false; + } + if data.target_state.best_finalized_peer_at_best_self.0 < 10 { + data.target_state.best_finalized_peer_at_best_self = HeaderId( + data.target_state.best_finalized_peer_at_best_self.0 + 1, + data.target_state.best_finalized_peer_at_best_self.0 + 1, + ); + } + if !data.submitted_messages_proofs.is_empty() { + exit_sender.unbounded_send(()).unwrap(); + } + }), + exit_receiver.into_future().map(|(_, _)| ()), + ); + + assert_eq!(result.submitted_messages_proofs, vec![(1..=1, None)],); + } + + #[test] + fn message_lane_loop_works() { + let (exit_sender, exit_receiver) = unbounded(); + let result = run_loop_test( + TestClientData { + source_state: ClientState { + best_self: HeaderId(10, 10), + best_finalized_self: HeaderId(10, 10), + best_finalized_peer_at_best_self: HeaderId(0, 0), + }, + source_latest_generated_nonce: 10, + target_state: ClientState { + best_self: HeaderId(0, 0), + best_finalized_self: HeaderId(0, 0), + best_finalized_peer_at_best_self: HeaderId(0, 0), + }, + target_latest_received_nonce: 0, + ..Default::default() + }, + Arc::new(|data: &mut TestClientData| { + // headers relay must only be started when we need new target headers at source node + if data.target_to_source_header_required.is_some() { + assert!(data.source_state.best_finalized_peer_at_best_self.0 < data.target_state.best_self.0); + data.target_to_source_header_required = None; + } + }), + Arc::new(move |data: &mut TestClientData| { + // headers relay must only be started when we need new source headers at target node + if data.source_to_target_header_required.is_some() { + assert!(data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_self.0); + data.source_to_target_header_required = None; + } + // syncing source headers -> target chain (all at once) + if data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_finalized_self.0 { + data.target_state.best_finalized_peer_at_best_self = data.source_state.best_finalized_self; + } + // syncing source headers -> target chain (all at once) + if data.source_state.best_finalized_peer_at_best_self.0 < data.target_state.best_finalized_self.0 { + data.source_state.best_finalized_peer_at_best_self = data.target_state.best_finalized_self; + } + // if target has received messages batch => increase blocks so that confirmations may be sent + if data.target_latest_received_nonce == 4 + || data.target_latest_received_nonce == 8 + || data.target_latest_received_nonce == 10 + { + data.target_state.best_self = + HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.0 + 1); + data.target_state.best_finalized_self = data.target_state.best_self; + data.source_state.best_self = + HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.0 + 1); + data.source_state.best_finalized_self = data.source_state.best_self; + } + // if source has received all messages receiving confirmations => stop + if data.source_latest_confirmed_received_nonce == 10 { + exit_sender.unbounded_send(()).unwrap(); + } + }), + exit_receiver.into_future().map(|(_, _)| ()), + ); + + // there are no strict restrictions on when reward confirmation should come + // (because `max_unconfirmed_nonces_at_target` is `100` in tests and this confirmation + // depends on the state of both clients) + // => we do not check it here + assert_eq!(result.submitted_messages_proofs[0].0, 1..=4); + assert_eq!(result.submitted_messages_proofs[1].0, 5..=8); + assert_eq!(result.submitted_messages_proofs[2].0, 9..=10); + assert!(!result.submitted_messages_receiving_proofs.is_empty()); + + // check that we have at least once required new source->target or target->source headers + assert!(!result.target_to_source_header_requirements.is_empty()); + assert!(!result.source_to_target_header_requirements.is_empty()); + } +} diff --git a/polkadot/relays/messages/src/message_race_delivery.rs b/polkadot/relays/messages/src/message_race_delivery.rs new file mode 100644 index 00000000000..225c59f23ca --- /dev/null +++ b/polkadot/relays/messages/src/message_race_delivery.rs @@ -0,0 +1,879 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Message delivery race delivers proof-of-messages from lane.source to lane.target. + +use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; +use crate::message_lane_loop::{ + MessageDeliveryParams, MessageProofParameters, MessageWeightsMap, SourceClient as MessageLaneSourceClient, + SourceClientState, TargetClient as MessageLaneTargetClient, TargetClientState, +}; +use crate::message_race_loop::{ + MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, TargetClient, + TargetClientNonces, +}; +use crate::message_race_strategy::BasicStrategy; +use crate::metrics::MessageLaneLoopMetrics; + +use async_trait::async_trait; +use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; +use futures::stream::FusedStream; +use relay_utils::FailedClient; +use std::{ + collections::{BTreeMap, VecDeque}, + marker::PhantomData, + ops::RangeInclusive, + time::Duration, +}; + +/// Run message delivery race. +pub async fn run( + source_client: impl MessageLaneSourceClient

, + source_state_updates: impl FusedStream>, + target_client: impl MessageLaneTargetClient

, + target_state_updates: impl FusedStream>, + stall_timeout: Duration, + metrics_msg: Option, + params: MessageDeliveryParams, +) -> Result<(), FailedClient> { + crate::message_race_loop::run( + MessageDeliveryRaceSource { + client: source_client, + metrics_msg: metrics_msg.clone(), + _phantom: Default::default(), + }, + source_state_updates, + MessageDeliveryRaceTarget { + client: target_client, + metrics_msg, + _phantom: Default::default(), + }, + target_state_updates, + stall_timeout, + MessageDeliveryStrategy::

{ + max_unrewarded_relayer_entries_at_target: params.max_unrewarded_relayer_entries_at_target, + max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, + max_messages_in_single_batch: params.max_messages_in_single_batch, + max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch, + max_messages_size_in_single_batch: params.max_messages_size_in_single_batch, + latest_confirmed_nonces_at_source: VecDeque::new(), + target_nonces: None, + strategy: BasicStrategy::new(), + }, + ) + .await +} + +/// Message delivery race. +struct MessageDeliveryRace

(std::marker::PhantomData

); + +impl MessageRace for MessageDeliveryRace

{ + type SourceHeaderId = SourceHeaderIdOf

; + type TargetHeaderId = TargetHeaderIdOf

; + + type MessageNonce = MessageNonce; + type Proof = P::MessagesProof; + + fn source_name() -> String { + format!("{}::MessagesDelivery", P::SOURCE_NAME) + } + + fn target_name() -> String { + format!("{}::MessagesDelivery", P::TARGET_NAME) + } +} + +/// Message delivery race source, which is a source of the lane. +struct MessageDeliveryRaceSource { + client: C, + metrics_msg: Option, + _phantom: PhantomData

, +} + +#[async_trait] +impl SourceClient> for MessageDeliveryRaceSource +where + P: MessageLane, + C: MessageLaneSourceClient

, +{ + type Error = C::Error; + type NoncesRange = MessageWeightsMap; + type ProofParameters = MessageProofParameters; + + async fn nonces( + &self, + at_block: SourceHeaderIdOf

, + prev_latest_nonce: MessageNonce, + ) -> Result<(SourceHeaderIdOf

, SourceClientNonces), Self::Error> { + let (at_block, latest_generated_nonce) = self.client.latest_generated_nonce(at_block).await?; + let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; + + if let Some(metrics_msg) = self.metrics_msg.as_ref() { + metrics_msg.update_source_latest_generated_nonce::

(latest_generated_nonce); + metrics_msg.update_source_latest_confirmed_nonce::

(latest_confirmed_nonce); + } + + let new_nonces = if latest_generated_nonce > prev_latest_nonce { + self.client + .generated_messages_weights(at_block.clone(), prev_latest_nonce + 1..=latest_generated_nonce) + .await? + } else { + MessageWeightsMap::new() + }; + + Ok(( + at_block, + SourceClientNonces { + new_nonces, + confirmed_nonce: Some(latest_confirmed_nonce), + }, + )) + } + + async fn generate_proof( + &self, + at_block: SourceHeaderIdOf

, + nonces: RangeInclusive, + proof_parameters: Self::ProofParameters, + ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error> { + self.client.prove_messages(at_block, nonces, proof_parameters).await + } +} + +/// Message delivery race target, which is a target of the lane. +struct MessageDeliveryRaceTarget { + client: C, + metrics_msg: Option, + _phantom: PhantomData

, +} + +#[async_trait] +impl TargetClient> for MessageDeliveryRaceTarget +where + P: MessageLane, + C: MessageLaneTargetClient

, +{ + type Error = C::Error; + type TargetNoncesData = DeliveryRaceTargetNoncesData; + + async fn require_source_header(&self, id: SourceHeaderIdOf

) { + self.client.require_source_header_on_target(id).await + } + + async fn nonces( + &self, + at_block: TargetHeaderIdOf

, + update_metrics: bool, + ) -> Result<(TargetHeaderIdOf

, TargetClientNonces), Self::Error> { + let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; + let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; + let (at_block, unrewarded_relayers) = self.client.unrewarded_relayers_state(at_block).await?; + + if update_metrics { + if let Some(metrics_msg) = self.metrics_msg.as_ref() { + metrics_msg.update_target_latest_received_nonce::

(latest_received_nonce); + metrics_msg.update_target_latest_confirmed_nonce::

(latest_confirmed_nonce); + } + } + + Ok(( + at_block, + TargetClientNonces { + latest_nonce: latest_received_nonce, + nonces_data: DeliveryRaceTargetNoncesData { + confirmed_nonce: latest_confirmed_nonce, + unrewarded_relayers, + }, + }, + )) + } + + async fn submit_proof( + &self, + generated_at_block: SourceHeaderIdOf

, + nonces: RangeInclusive, + proof: P::MessagesProof, + ) -> Result, Self::Error> { + self.client + .submit_messages_proof(generated_at_block, nonces, proof) + .await + } +} + +/// Additional nonces data from the target client used by message delivery race. +#[derive(Debug, Clone)] +struct DeliveryRaceTargetNoncesData { + /// Latest nonce that we know: (1) has been delivered to us (2) has been confirmed + /// back to the source node (by confirmations race) and (3) relayer has received + /// reward for (and this has been confirmed by the message delivery race). + confirmed_nonce: MessageNonce, + /// State of the unrewarded relayers set at the target node. + unrewarded_relayers: UnrewardedRelayersState, +} + +/// Messages delivery strategy. +struct MessageDeliveryStrategy { + /// Maximal unrewarded relayer entries at target client. + max_unrewarded_relayer_entries_at_target: MessageNonce, + /// Maximal unconfirmed nonces at target client. + max_unconfirmed_nonces_at_target: MessageNonce, + /// Maximal number of messages in the single delivery transaction. + max_messages_in_single_batch: MessageNonce, + /// Maximal cumulative messages weight in the single delivery transaction. + max_messages_weight_in_single_batch: Weight, + /// Maximal messages size in the single delivery transaction. + max_messages_size_in_single_batch: usize, + /// Latest confirmed nonces at the source client + the header id where we have first met this nonce. + latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf

, MessageNonce)>, + /// Target nonces from the source client. + target_nonces: Option>, + /// Basic delivery strategy. + strategy: MessageDeliveryStrategyBase

, +} + +type MessageDeliveryStrategyBase

= BasicStrategy< +

::SourceHeaderNumber, +

::SourceHeaderHash, +

::TargetHeaderNumber, +

::TargetHeaderHash, + MessageWeightsMap, +

::MessagesProof, +>; + +impl std::fmt::Debug for MessageDeliveryStrategy

{ + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("MessageDeliveryStrategy") + .field( + "max_unrewarded_relayer_entries_at_target", + &self.max_unrewarded_relayer_entries_at_target, + ) + .field( + "max_unconfirmed_nonces_at_target", + &self.max_unconfirmed_nonces_at_target, + ) + .field("max_messages_in_single_batch", &self.max_messages_in_single_batch) + .field( + "max_messages_weight_in_single_batch", + &self.max_messages_weight_in_single_batch, + ) + .field( + "max_messages_size_in_single_batch", + &self.max_messages_size_in_single_batch, + ) + .field( + "latest_confirmed_nonces_at_source", + &self.latest_confirmed_nonces_at_source, + ) + .field("target_nonces", &self.target_nonces) + .field("strategy", &self.strategy) + .finish() + } +} + +impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> + for MessageDeliveryStrategy

+{ + type SourceNoncesRange = MessageWeightsMap; + type ProofParameters = MessageProofParameters; + type TargetNoncesData = DeliveryRaceTargetNoncesData; + + fn is_empty(&self) -> bool { + self.strategy.is_empty() + } + + fn required_source_header_at_target(&self, current_best: &SourceHeaderIdOf

) -> Option> { + self.strategy.required_source_header_at_target(current_best) + } + + fn best_at_source(&self) -> Option { + self.strategy.best_at_source() + } + + fn best_at_target(&self) -> Option { + self.strategy.best_at_target() + } + + fn source_nonces_updated( + &mut self, + at_block: SourceHeaderIdOf

, + nonces: SourceClientNonces, + ) { + if let Some(confirmed_nonce) = nonces.confirmed_nonce { + let is_confirmed_nonce_updated = self + .latest_confirmed_nonces_at_source + .back() + .map(|(_, prev_nonce)| *prev_nonce != confirmed_nonce) + .unwrap_or(true); + if is_confirmed_nonce_updated { + self.latest_confirmed_nonces_at_source + .push_back((at_block.clone(), confirmed_nonce)); + } + } + self.strategy.source_nonces_updated(at_block, nonces) + } + + fn best_target_nonces_updated( + &mut self, + nonces: TargetClientNonces, + race_state: &mut RaceState, TargetHeaderIdOf

, P::MessagesProof>, + ) { + // best target nonces must always be ge than finalized target nonces + let mut target_nonces = self.target_nonces.take().unwrap_or_else(|| nonces.clone()); + target_nonces.nonces_data = nonces.nonces_data.clone(); + target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); + self.target_nonces = Some(target_nonces); + + self.strategy.best_target_nonces_updated( + TargetClientNonces { + latest_nonce: nonces.latest_nonce, + nonces_data: (), + }, + race_state, + ) + } + + fn finalized_target_nonces_updated( + &mut self, + nonces: TargetClientNonces, + race_state: &mut RaceState, TargetHeaderIdOf

, P::MessagesProof>, + ) { + if let Some(ref best_finalized_source_header_id_at_best_target) = + race_state.best_finalized_source_header_id_at_best_target + { + let oldest_header_number_to_keep = best_finalized_source_header_id_at_best_target.0; + while self + .latest_confirmed_nonces_at_source + .front() + .map(|(id, _)| id.0 < oldest_header_number_to_keep) + .unwrap_or(false) + { + self.latest_confirmed_nonces_at_source.pop_front(); + } + } + + if let Some(ref mut target_nonces) = self.target_nonces { + target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); + } + + self.strategy.finalized_target_nonces_updated( + TargetClientNonces { + latest_nonce: nonces.latest_nonce, + nonces_data: (), + }, + race_state, + ) + } + + fn select_nonces_to_deliver( + &mut self, + race_state: &RaceState, TargetHeaderIdOf

, P::MessagesProof>, + ) -> Option<(RangeInclusive, Self::ProofParameters)> { + let best_finalized_source_header_id_at_best_target = + race_state.best_finalized_source_header_id_at_best_target.clone()?; + let latest_confirmed_nonce_at_source = self + .latest_confirmed_nonces_at_source + .iter() + .take_while(|(id, _)| id.0 <= best_finalized_source_header_id_at_best_target.0) + .last() + .map(|(_, nonce)| *nonce)?; + let target_nonces = self.target_nonces.as_ref()?; + + // There's additional condition in the message delivery race: target would reject messages + // if there are too much unconfirmed messages at the inbound lane. + + // The receiving race is responsible to deliver confirmations back to the source chain. So if + // there's a lot of unconfirmed messages, let's wait until it'll be able to do its job. + let latest_received_nonce_at_target = target_nonces.latest_nonce; + let confirmations_missing = latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source); + match confirmations_missing { + Some(confirmations_missing) if confirmations_missing >= self.max_unconfirmed_nonces_at_target => { + log::debug!( + target: "bridge", + "Cannot deliver any more messages from {} to {}. Too many unconfirmed nonces \ + at target: target.latest_received={:?}, source.latest_confirmed={:?}, max={:?}", + MessageDeliveryRace::

::source_name(), + MessageDeliveryRace::

::target_name(), + latest_received_nonce_at_target, + latest_confirmed_nonce_at_source, + self.max_unconfirmed_nonces_at_target, + ); + + return None; + } + _ => (), + } + + // Ok - we may have new nonces to deliver. But target may still reject new messages, because we haven't + // notified it that (some) messages have been confirmed. So we may want to include updated + // `source.latest_confirmed` in the proof. + // + // Important note: we're including outbound state lane proof whenever there are unconfirmed nonces + // on the target chain. Other strategy is to include it only if it's absolutely necessary. + let latest_confirmed_nonce_at_target = target_nonces.nonces_data.confirmed_nonce; + let outbound_state_proof_required = latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source; + + // The target node would also reject messages if there are too many entries in the + // "unrewarded relayers" set. If we are unable to prove new rewards to the target node, then + // we should wait for confirmations race. + let unrewarded_relayer_entries_limit_reached = + target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries + >= self.max_unrewarded_relayer_entries_at_target; + if unrewarded_relayer_entries_limit_reached { + // so there are already too many unrewarded relayer entries in the set + // + // => check if we can prove enough rewards. If not, we should wait for more rewards to be paid + let number_of_rewards_being_proved = + latest_confirmed_nonce_at_source.saturating_sub(latest_confirmed_nonce_at_target); + let enough_rewards_being_proved = number_of_rewards_being_proved + >= target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry; + if !enough_rewards_being_proved { + return None; + } + } + + // If we're here, then the confirmations race did its job && sending side now knows that messages + // have been delivered. Now let's select nonces that we want to deliver. + // + // We may deliver at most: + // + // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_target) + // + // messages in the batch. But since we're including outbound state proof in the batch, then it + // may be increased to: + // + // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_source) + let future_confirmed_nonce_at_target = if outbound_state_proof_required { + latest_confirmed_nonce_at_source + } else { + latest_confirmed_nonce_at_target + }; + let max_nonces = latest_received_nonce_at_target + .checked_sub(future_confirmed_nonce_at_target) + .and_then(|diff| self.max_unconfirmed_nonces_at_target.checked_sub(diff)) + .unwrap_or_default(); + let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); + let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; + let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; + let mut selected_weight: Weight = 0; + let mut selected_size: usize = 0; + let mut selected_count: MessageNonce = 0; + + let selected_nonces = self + .strategy + .select_nonces_to_deliver_with_selector(race_state, |range| { + let to_requeue = range + .into_iter() + .skip_while(|(_, weight)| { + // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` + // and `max_messages_size_in_single_batch`, we may still try to submit transaction + // with single message if message overflows these limits. The worst case would be if + // transaction will be rejected by the target runtime, but at least we have tried. + + // limit messages in the batch by weight + let new_selected_weight = match selected_weight.checked_add(weight.weight) { + Some(new_selected_weight) if new_selected_weight <= max_messages_weight_in_single_batch => { + new_selected_weight + } + new_selected_weight if selected_count == 0 => { + log::warn!( + target: "bridge", + "Going to submit message delivery transaction with declared dispatch \ + weight {:?} that overflows maximal configured weight {}", + new_selected_weight, + max_messages_weight_in_single_batch, + ); + new_selected_weight.unwrap_or(Weight::MAX) + } + _ => return false, + }; + + // limit messages in the batch by size + let new_selected_size = match selected_size.checked_add(weight.size) { + Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch => { + new_selected_size + } + new_selected_size if selected_count == 0 => { + log::warn!( + target: "bridge", + "Going to submit message delivery transaction with message \ + size {:?} that overflows maximal configured size {}", + new_selected_size, + max_messages_size_in_single_batch, + ); + new_selected_size.unwrap_or(usize::MAX) + } + _ => return false, + }; + + // limit number of messages in the batch + let new_selected_count = selected_count + 1; + if new_selected_count > max_nonces { + return false; + } + + selected_weight = new_selected_weight; + selected_size = new_selected_size; + selected_count = new_selected_count; + true + }) + .collect::>(); + if to_requeue.is_empty() { + None + } else { + Some(to_requeue) + } + })?; + + Some(( + selected_nonces, + MessageProofParameters { + outbound_state_proof_required, + dispatch_weight: selected_weight, + }, + )) + } +} + +impl NoncesRange for MessageWeightsMap { + fn begin(&self) -> MessageNonce { + self.keys().next().cloned().unwrap_or_default() + } + + fn end(&self) -> MessageNonce { + self.keys().next_back().cloned().unwrap_or_default() + } + + fn greater_than(mut self, nonce: MessageNonce) -> Option { + let gte = self.split_off(&(nonce + 1)); + if gte.is_empty() { + None + } else { + Some(gte) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::message_lane_loop::{ + tests::{header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderId, TestTargetHeaderId}, + MessageWeights, + }; + + type TestRaceState = RaceState; + type TestStrategy = MessageDeliveryStrategy; + + fn prepare_strategy() -> (TestRaceState, TestStrategy) { + let mut race_state = RaceState { + best_finalized_source_header_id_at_source: Some(header_id(1)), + best_finalized_source_header_id_at_best_target: Some(header_id(1)), + best_target_header_id: Some(header_id(1)), + best_finalized_target_header_id: Some(header_id(1)), + nonces_to_submit: None, + nonces_submitted: None, + }; + + let mut race_strategy = TestStrategy { + max_unrewarded_relayer_entries_at_target: 4, + max_unconfirmed_nonces_at_target: 4, + max_messages_in_single_batch: 4, + max_messages_weight_in_single_batch: 4, + max_messages_size_in_single_batch: 4, + latest_confirmed_nonces_at_source: vec![(header_id(1), 19)].into_iter().collect(), + target_nonces: Some(TargetClientNonces { + latest_nonce: 19, + nonces_data: DeliveryRaceTargetNoncesData { + confirmed_nonce: 19, + unrewarded_relayers: UnrewardedRelayersState { + unrewarded_relayer_entries: 0, + messages_in_oldest_entry: 0, + total_messages: 0, + }, + }, + }), + strategy: BasicStrategy::new(), + }; + + race_strategy.strategy.source_nonces_updated( + header_id(1), + SourceClientNonces { + new_nonces: vec![ + (20, MessageWeights { weight: 1, size: 1 }), + (21, MessageWeights { weight: 1, size: 1 }), + (22, MessageWeights { weight: 1, size: 1 }), + (23, MessageWeights { weight: 1, size: 1 }), + ] + .into_iter() + .collect(), + confirmed_nonce: Some(19), + }, + ); + + let target_nonces = TargetClientNonces { + latest_nonce: 19, + nonces_data: (), + }; + race_strategy + .strategy + .best_target_nonces_updated(target_nonces.clone(), &mut race_state); + race_strategy + .strategy + .finalized_target_nonces_updated(target_nonces, &mut race_state); + + (race_state, race_strategy) + } + + fn proof_parameters(state_required: bool, weight: Weight) -> MessageProofParameters { + MessageProofParameters { + outbound_state_proof_required: state_required, + dispatch_weight: weight, + } + } + + #[test] + fn weights_map_works_as_nonces_range() { + fn build_map(range: RangeInclusive) -> MessageWeightsMap { + range + .map(|idx| { + ( + idx, + MessageWeights { + weight: idx, + size: idx as _, + }, + ) + }) + .collect() + } + + let map = build_map(20..=30); + + assert_eq!(map.begin(), 20); + assert_eq!(map.end(), 30); + assert_eq!(map.clone().greater_than(10), Some(build_map(20..=30))); + assert_eq!(map.clone().greater_than(19), Some(build_map(20..=30))); + assert_eq!(map.clone().greater_than(20), Some(build_map(21..=30))); + assert_eq!(map.clone().greater_than(25), Some(build_map(26..=30))); + assert_eq!(map.clone().greater_than(29), Some(build_map(30..=30))); + assert_eq!(map.greater_than(30), None); + } + + #[test] + fn message_delivery_strategy_selects_messages_to_deliver() { + let (state, mut strategy) = prepare_strategy(); + + // both sides are ready to relay new messages + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=23), proof_parameters(false, 4))) + ); + } + + #[test] + fn message_delivery_strategy_selects_nothing_if_too_many_confirmations_missing() { + let (state, mut strategy) = prepare_strategy(); + + // if there are already `max_unconfirmed_nonces_at_target` messages on target, + // we need to wait until confirmations will be delivered by receiving race + strategy.latest_confirmed_nonces_at_source = vec![( + header_id(1), + strategy.target_nonces.as_ref().unwrap().latest_nonce - strategy.max_unconfirmed_nonces_at_target, + )] + .into_iter() + .collect(); + assert_eq!(strategy.select_nonces_to_deliver(&state), None); + } + + #[test] + fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() { + let (state, mut strategy) = prepare_strategy(); + + // if there are new confirmed nonces on source, we want to relay this information + // to target to prune rewards queue + let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=23), proof_parameters(true, 4))) + ); + } + + #[test] + fn message_delivery_strategy_selects_nothing_if_there_are_too_many_unrewarded_relayers() { + let (state, mut strategy) = prepare_strategy(); + + // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, + // we need to wait until rewards will be paid + { + let mut unrewarded_relayers = &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers; + unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; + unrewarded_relayers.messages_in_oldest_entry = 4; + } + assert_eq!(strategy.select_nonces_to_deliver(&state), None); + } + + #[test] + fn message_delivery_strategy_selects_nothing_if_proved_rewards_is_not_enough_to_remove_oldest_unrewarded_entry() { + let (state, mut strategy) = prepare_strategy(); + + // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, + // we need to prove at least `messages_in_oldest_entry` rewards + let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + { + let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; + nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; + unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; + unrewarded_relayers.messages_in_oldest_entry = 4; + } + assert_eq!(strategy.select_nonces_to_deliver(&state), None); + } + + #[test] + fn message_delivery_strategy_includes_outbound_state_proof_if_proved_rewards_is_enough() { + let (state, mut strategy) = prepare_strategy(); + + // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, + // we need to prove at least `messages_in_oldest_entry` rewards + let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + { + let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; + nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 3; + let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; + unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target; + unrewarded_relayers.messages_in_oldest_entry = 3; + } + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=23), proof_parameters(true, 4))) + ); + } + + #[test] + fn message_delivery_strategy_limits_batch_by_messages_weight() { + let (state, mut strategy) = prepare_strategy(); + + // not all queued messages may fit in the batch, because batch has max weight + strategy.max_messages_weight_in_single_batch = 3; + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=22), proof_parameters(false, 3))) + ); + } + + #[test] + fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight() { + let (state, mut strategy) = prepare_strategy(); + + // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) + strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().weight = 10; + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=20), proof_parameters(false, 10))) + ); + } + + #[test] + fn message_delivery_strategy_limits_batch_by_messages_size() { + let (state, mut strategy) = prepare_strategy(); + + // not all queued messages may fit in the batch, because batch has max weight + strategy.max_messages_size_in_single_batch = 3; + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=22), proof_parameters(false, 3))) + ); + } + + #[test] + fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size() { + let (state, mut strategy) = prepare_strategy(); + + // first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4) + strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10; + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=20), proof_parameters(false, 1))) + ); + } + + #[test] + fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() { + let (state, mut strategy) = prepare_strategy(); + + // not all queued messages may fit in the batch, because batch has max number of messages limit + strategy.max_messages_in_single_batch = 3; + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=22), proof_parameters(false, 3))) + ); + } + + #[test] + fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces() { + let (state, mut strategy) = prepare_strategy(); + + // 1 delivery confirmation from target to source is still missing, so we may only + // relay 3 new messages + let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + strategy.latest_confirmed_nonces_at_source = vec![(header_id(1), prev_confirmed_nonce_at_source - 1)] + .into_iter() + .collect(); + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=22), proof_parameters(false, 3))) + ); + } + + #[test] + fn message_delivery_strategy_waits_for_confirmed_nonce_header_to_appear_on_target() { + // 1 delivery confirmation from target to source is still missing, so we may deliver + // reward confirmation with our message delivery transaction. But the problem is that + // the reward has been paid at header 2 && this header is still unknown to target node. + // + // => so we can't deliver more than 3 messages + let (mut state, mut strategy) = prepare_strategy(); + let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + strategy.latest_confirmed_nonces_at_source = vec![ + (header_id(1), prev_confirmed_nonce_at_source - 1), + (header_id(2), prev_confirmed_nonce_at_source), + ] + .into_iter() + .collect(); + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=22), proof_parameters(false, 3))) + ); + + // the same situation, but the header 2 is known to the target node, so we may deliver reward confirmation + let (mut state, mut strategy) = prepare_strategy(); + let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1; + strategy.latest_confirmed_nonces_at_source = vec![ + (header_id(1), prev_confirmed_nonce_at_source - 1), + (header_id(2), prev_confirmed_nonce_at_source), + ] + .into_iter() + .collect(); + strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; + state.best_finalized_source_header_id_at_source = Some(header_id(2)); + state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); + assert_eq!( + strategy.select_nonces_to_deliver(&state), + Some(((20..=23), proof_parameters(true, 4))) + ); + } +} diff --git a/polkadot/relays/messages/src/message_race_loop.rs b/polkadot/relays/messages/src/message_race_loop.rs new file mode 100644 index 00000000000..41f5ede1033 --- /dev/null +++ b/polkadot/relays/messages/src/message_race_loop.rs @@ -0,0 +1,627 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Loop that is serving single race within message lane. This could be +//! message delivery race, receiving confirmations race or processing +//! confirmations race. +//! +//! The idea of the race is simple - we have `nonce`-s on source and target +//! nodes. We're trying to prove that the source node has this nonce (and +//! associated data - like messages, lane state, etc) to the target node by +//! generating and submitting proof. + +use crate::message_lane_loop::ClientState; + +use async_trait::async_trait; +use bp_messages::MessageNonce; +use futures::{ + future::FutureExt, + stream::{FusedStream, StreamExt}, +}; +use relay_utils::{process_future_result, retry_backoff, FailedClient, MaybeConnectionError}; +use std::{ + fmt::Debug, + ops::RangeInclusive, + time::{Duration, Instant}, +}; + +/// One of races within lane. +pub trait MessageRace { + /// Header id of the race source. + type SourceHeaderId: Debug + Clone + PartialEq; + /// Header id of the race source. + type TargetHeaderId: Debug + Clone + PartialEq; + + /// Message nonce used in the race. + type MessageNonce: Debug + Clone; + /// Proof that is generated and delivered in this race. + type Proof: Debug + Clone; + + /// Name of the race source. + fn source_name() -> String; + /// Name of the race target. + fn target_name() -> String; +} + +/// State of race source client. +type SourceClientState

= ClientState<

::SourceHeaderId,

::TargetHeaderId>; + +/// State of race target client. +type TargetClientState

= ClientState<

::TargetHeaderId,

::SourceHeaderId>; + +/// Inclusive nonces range. +pub trait NoncesRange: Debug + Sized { + /// Get begin of the range. + fn begin(&self) -> MessageNonce; + /// Get end of the range. + fn end(&self) -> MessageNonce; + /// Returns new range with current range nonces that are greater than the passed `nonce`. + /// If there are no such nonces, `None` is returned. + fn greater_than(self, nonce: MessageNonce) -> Option; +} + +/// Nonces on the race source client. +#[derive(Debug, Clone)] +pub struct SourceClientNonces { + /// New nonces range known to the client. `New` here means all nonces generated after + /// `prev_latest_nonce` passed to the `SourceClient::nonces` method. + pub new_nonces: NoncesRange, + /// Latest nonce that is confirmed to the bridged client. This nonce only makes + /// sense in some races. In other races it is `None`. + pub confirmed_nonce: Option, +} + +/// Nonces on the race target client. +#[derive(Debug, Clone)] +pub struct TargetClientNonces { + /// Latest nonce that is known to the target client. + pub latest_nonce: MessageNonce, + /// Additional data from target node that may be used by the race. + pub nonces_data: TargetNoncesData, +} + +/// One of message lane clients, which is source client for the race. +#[async_trait] +pub trait SourceClient { + /// Type of error this clients returns. + type Error: std::fmt::Debug + MaybeConnectionError; + /// Type of nonces range returned by the source client. + type NoncesRange: NoncesRange; + /// Additional proof parameters required to generate proof. + type ProofParameters; + + /// Return nonces that are known to the source client. + async fn nonces( + &self, + at_block: P::SourceHeaderId, + prev_latest_nonce: MessageNonce, + ) -> Result<(P::SourceHeaderId, SourceClientNonces), Self::Error>; + /// Generate proof for delivering to the target client. + async fn generate_proof( + &self, + at_block: P::SourceHeaderId, + nonces: RangeInclusive, + proof_parameters: Self::ProofParameters, + ) -> Result<(P::SourceHeaderId, RangeInclusive, P::Proof), Self::Error>; +} + +/// One of message lane clients, which is target client for the race. +#[async_trait] +pub trait TargetClient { + /// Type of error this clients returns. + type Error: std::fmt::Debug + MaybeConnectionError; + /// Type of the additional data from the target client, used by the race. + type TargetNoncesData: std::fmt::Debug; + + /// Ask headers relay to relay finalized headers up to (and including) given header + /// from race source to race target. + async fn require_source_header(&self, id: P::SourceHeaderId); + + /// Return nonces that are known to the target client. + async fn nonces( + &self, + at_block: P::TargetHeaderId, + update_metrics: bool, + ) -> Result<(P::TargetHeaderId, TargetClientNonces), Self::Error>; + /// Submit proof to the target client. + async fn submit_proof( + &self, + generated_at_block: P::SourceHeaderId, + nonces: RangeInclusive, + proof: P::Proof, + ) -> Result, Self::Error>; +} + +/// Race strategy. +pub trait RaceStrategy: Debug { + /// Type of nonces range expected from the source client. + type SourceNoncesRange: NoncesRange; + /// Additional proof parameters required to generate proof. + type ProofParameters; + /// Additional data expected from the target client. + type TargetNoncesData; + + /// Should return true if nothing has to be synced. + fn is_empty(&self) -> bool; + /// Return id of source header that is required to be on target to continue synchronization. + fn required_source_header_at_target(&self, current_best: &SourceHeaderId) -> Option; + /// Return best nonce at source node. + /// + /// `Some` is returned only if we are sure that the value is greater or equal + /// than the result of `best_at_target`. + fn best_at_source(&self) -> Option; + /// Return best nonce at target node. + /// + /// May return `None` if value is yet unknown. + fn best_at_target(&self) -> Option; + + /// Called when nonces are updated at source node of the race. + fn source_nonces_updated(&mut self, at_block: SourceHeaderId, nonces: SourceClientNonces); + /// Called when best nonces are updated at target node of the race. + fn best_target_nonces_updated( + &mut self, + nonces: TargetClientNonces, + race_state: &mut RaceState, + ); + /// Called when finalized nonces are updated at target node of the race. + fn finalized_target_nonces_updated( + &mut self, + nonces: TargetClientNonces, + race_state: &mut RaceState, + ); + /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated + /// data) from source to target node. + /// Additionally, parameters required to generate proof are returned. + fn select_nonces_to_deliver( + &mut self, + race_state: &RaceState, + ) -> Option<(RangeInclusive, Self::ProofParameters)>; +} + +/// State of the race. +#[derive(Debug)] +pub struct RaceState { + /// Best finalized source header id at the source client. + pub best_finalized_source_header_id_at_source: Option, + /// Best finalized source header id at the best block on the target + /// client (at the `best_finalized_source_header_id_at_best_target`). + pub best_finalized_source_header_id_at_best_target: Option, + /// Best header id at the target client. + pub best_target_header_id: Option, + /// Best finalized header id at the target client. + pub best_finalized_target_header_id: Option, + /// Range of nonces that we have selected to submit. + pub nonces_to_submit: Option<(SourceHeaderId, RangeInclusive, Proof)>, + /// Range of nonces that is currently submitted. + pub nonces_submitted: Option>, +} + +/// Run race loop until connection with target or source node is lost. +pub async fn run, TC: TargetClient

>( + race_source: SC, + race_source_updated: impl FusedStream>, + race_target: TC, + race_target_updated: impl FusedStream>, + stall_timeout: Duration, + mut strategy: impl RaceStrategy< + P::SourceHeaderId, + P::TargetHeaderId, + P::Proof, + SourceNoncesRange = SC::NoncesRange, + ProofParameters = SC::ProofParameters, + TargetNoncesData = TC::TargetNoncesData, + >, +) -> Result<(), FailedClient> { + let mut progress_context = Instant::now(); + let mut race_state = RaceState::default(); + let mut stall_countdown = Instant::now(); + + let mut source_retry_backoff = retry_backoff(); + let mut source_client_is_online = true; + let mut source_nonces_required = false; + let source_nonces = futures::future::Fuse::terminated(); + let source_generate_proof = futures::future::Fuse::terminated(); + let source_go_offline_future = futures::future::Fuse::terminated(); + + let mut target_retry_backoff = retry_backoff(); + let mut target_client_is_online = true; + let mut target_best_nonces_required = false; + let mut target_finalized_nonces_required = false; + let target_best_nonces = futures::future::Fuse::terminated(); + let target_finalized_nonces = futures::future::Fuse::terminated(); + let target_submit_proof = futures::future::Fuse::terminated(); + let target_go_offline_future = futures::future::Fuse::terminated(); + + futures::pin_mut!( + race_source_updated, + source_nonces, + source_generate_proof, + source_go_offline_future, + race_target_updated, + target_best_nonces, + target_finalized_nonces, + target_submit_proof, + target_go_offline_future, + ); + + loop { + futures::select! { + // when headers ids are updated + source_state = race_source_updated.next() => { + if let Some(source_state) = source_state { + let is_source_state_updated = race_state.best_finalized_source_header_id_at_source.as_ref() + != Some(&source_state.best_finalized_self); + if is_source_state_updated { + source_nonces_required = true; + race_state.best_finalized_source_header_id_at_source = Some(source_state.best_finalized_self); + } + } + }, + target_state = race_target_updated.next() => { + if let Some(target_state) = target_state { + let is_target_best_state_updated = race_state.best_target_header_id.as_ref() + != Some(&target_state.best_self); + + if is_target_best_state_updated { + target_best_nonces_required = true; + race_state.best_target_header_id = Some(target_state.best_self); + race_state.best_finalized_source_header_id_at_best_target + = Some(target_state.best_finalized_peer_at_best_self); + } + + let is_target_finalized_state_updated = race_state.best_finalized_target_header_id.as_ref() + != Some(&target_state.best_finalized_self); + if is_target_finalized_state_updated { + target_finalized_nonces_required = true; + race_state.best_finalized_target_header_id = Some(target_state.best_finalized_self); + } + } + }, + + // when nonces are updated + nonces = source_nonces => { + source_nonces_required = false; + + source_client_is_online = process_future_result( + nonces, + &mut source_retry_backoff, + |(at_block, nonces)| { + log::debug!( + target: "bridge", + "Received nonces from {}: {:?}", + P::source_name(), + nonces, + ); + + strategy.source_nonces_updated(at_block, nonces); + }, + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving nonces from {}", P::source_name()), + ).fail_if_connection_error(FailedClient::Source)?; + + // ask for more headers if we have nonces to deliver and required headers are missing + let required_source_header_id = race_state + .best_finalized_source_header_id_at_best_target + .as_ref() + .and_then(|best|strategy.required_source_header_at_target(best)); + if let Some(required_source_header_id) = required_source_header_id { + race_target.require_source_header(required_source_header_id).await; + } + }, + nonces = target_best_nonces => { + target_best_nonces_required = false; + + target_client_is_online = process_future_result( + nonces, + &mut target_retry_backoff, + |(_, nonces)| { + log::debug!( + target: "bridge", + "Received best nonces from {}: {:?}", + P::target_name(), + nonces, + ); + + let prev_best_at_target = strategy.best_at_target(); + strategy.best_target_nonces_updated(nonces, &mut race_state); + if strategy.best_at_target() != prev_best_at_target { + stall_countdown = Instant::now(); + } + }, + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving best nonces from {}", P::target_name()), + ).fail_if_connection_error(FailedClient::Target)?; + }, + nonces = target_finalized_nonces => { + target_finalized_nonces_required = false; + + target_client_is_online = process_future_result( + nonces, + &mut target_retry_backoff, + |(_, nonces)| { + log::debug!( + target: "bridge", + "Received finalized nonces from {}: {:?}", + P::target_name(), + nonces, + ); + + strategy.finalized_target_nonces_updated(nonces, &mut race_state); + }, + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error retrieving finalized nonces from {}", P::target_name()), + ).fail_if_connection_error(FailedClient::Target)?; + }, + + // proof generation and submission + proof = source_generate_proof => { + source_client_is_online = process_future_result( + proof, + &mut source_retry_backoff, + |(at_block, nonces_range, proof)| { + log::debug!( + target: "bridge", + "Received proof for nonces in range {:?} from {}", + nonces_range, + P::source_name(), + ); + + race_state.nonces_to_submit = Some((at_block, nonces_range, proof)); + }, + &mut source_go_offline_future, + async_std::task::sleep, + || format!("Error generating proof at {}", P::source_name()), + ).fail_if_connection_error(FailedClient::Source)?; + }, + proof_submit_result = target_submit_proof => { + target_client_is_online = process_future_result( + proof_submit_result, + &mut target_retry_backoff, + |nonces_range| { + log::debug!( + target: "bridge", + "Successfully submitted proof of nonces {:?} to {}", + nonces_range, + P::target_name(), + ); + + race_state.nonces_to_submit = None; + race_state.nonces_submitted = Some(nonces_range); + stall_countdown = Instant::now(); + }, + &mut target_go_offline_future, + async_std::task::sleep, + || format!("Error submitting proof {}", P::target_name()), + ).fail_if_connection_error(FailedClient::Target)?; + }, + + // when we're ready to retry request + _ = source_go_offline_future => { + source_client_is_online = true; + }, + _ = target_go_offline_future => { + target_client_is_online = true; + }, + } + + progress_context = print_race_progress::(progress_context, &strategy); + + if stall_countdown.elapsed() > stall_timeout { + log::warn!( + target: "bridge", + "{} -> {} race has stalled. State: {:?}. Strategy: {:?}", + P::source_name(), + P::target_name(), + race_state, + strategy, + ); + + return Err(FailedClient::Both); + } else if race_state.nonces_to_submit.is_none() && race_state.nonces_submitted.is_none() && strategy.is_empty() + { + stall_countdown = Instant::now(); + } + + if source_client_is_online { + source_client_is_online = false; + + let nonces_to_deliver = select_nonces_to_deliver(&race_state, &mut strategy); + let best_at_source = strategy.best_at_source(); + + if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver { + log::debug!( + target: "bridge", + "Asking {} to prove nonces in range {:?} at block {:?}", + P::source_name(), + nonces_range, + at_block, + ); + source_generate_proof.set( + race_source + .generate_proof(at_block, nonces_range, proof_parameters) + .fuse(), + ); + } else if source_nonces_required && best_at_source.is_some() { + log::debug!(target: "bridge", "Asking {} about message nonces", P::source_name()); + let at_block = race_state + .best_finalized_source_header_id_at_source + .as_ref() + .expect( + "source_nonces_required is only true when\ + best_finalized_source_header_id_at_source is Some; qed", + ) + .clone(); + source_nonces.set( + race_source + .nonces(at_block, best_at_source.expect("guaranteed by if condition; qed")) + .fuse(), + ); + } else { + source_client_is_online = true; + } + } + + if target_client_is_online { + target_client_is_online = false; + + if let Some((at_block, nonces_range, proof)) = race_state.nonces_to_submit.as_ref() { + log::debug!( + target: "bridge", + "Going to submit proof of messages in range {:?} to {} node", + nonces_range, + P::target_name(), + ); + target_submit_proof.set( + race_target + .submit_proof(at_block.clone(), nonces_range.clone(), proof.clone()) + .fuse(), + ); + } else if target_best_nonces_required { + log::debug!(target: "bridge", "Asking {} about best message nonces", P::target_name()); + let at_block = race_state + .best_target_header_id + .as_ref() + .expect("target_best_nonces_required is only true when best_target_header_id is Some; qed") + .clone(); + target_best_nonces.set(race_target.nonces(at_block, false).fuse()); + } else if target_finalized_nonces_required { + log::debug!(target: "bridge", "Asking {} about finalized message nonces", P::target_name()); + let at_block = race_state + .best_finalized_target_header_id + .as_ref() + .expect( + "target_finalized_nonces_required is only true when\ + best_finalized_target_header_id is Some; qed", + ) + .clone(); + target_finalized_nonces.set(race_target.nonces(at_block, true).fuse()); + } else { + target_client_is_online = true; + } + } + } +} + +impl Default for RaceState { + fn default() -> Self { + RaceState { + best_finalized_source_header_id_at_source: None, + best_finalized_source_header_id_at_best_target: None, + best_target_header_id: None, + best_finalized_target_header_id: None, + nonces_to_submit: None, + nonces_submitted: None, + } + } +} + +/// Print race progress. +fn print_race_progress(prev_time: Instant, strategy: &S) -> Instant +where + P: MessageRace, + S: RaceStrategy, +{ + let now_time = Instant::now(); + + let need_update = now_time.saturating_duration_since(prev_time) > Duration::from_secs(10); + if !need_update { + return prev_time; + } + + let now_best_nonce_at_source = strategy.best_at_source(); + let now_best_nonce_at_target = strategy.best_at_target(); + log::info!( + target: "bridge", + "Synced {:?} of {:?} nonces in {} -> {} race", + now_best_nonce_at_target, + now_best_nonce_at_source, + P::source_name(), + P::target_name(), + ); + now_time +} + +fn select_nonces_to_deliver( + race_state: &RaceState, + strategy: &mut Strategy, +) -> Option<(SourceHeaderId, RangeInclusive, Strategy::ProofParameters)> +where + SourceHeaderId: Clone, + Strategy: RaceStrategy, +{ + race_state + .best_finalized_source_header_id_at_best_target + .as_ref() + .and_then(|best_finalized_source_header_id_at_best_target| { + strategy + .select_nonces_to_deliver(&race_state) + .map(|(nonces_range, proof_parameters)| { + ( + best_finalized_source_header_id_at_best_target.clone(), + nonces_range, + proof_parameters, + ) + }) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::message_race_strategy::BasicStrategy; + use relay_utils::HeaderId; + + #[test] + fn proof_is_generated_at_best_block_known_to_target_node() { + const GENERATED_AT: u64 = 6; + const BEST_AT_SOURCE: u64 = 10; + const BEST_AT_TARGET: u64 = 8; + + // target node only knows about source' BEST_AT_TARGET block + // source node has BEST_AT_SOURCE > BEST_AT_TARGET block + let mut race_state = RaceState::<_, _, ()> { + best_finalized_source_header_id_at_source: Some(HeaderId(BEST_AT_SOURCE, BEST_AT_SOURCE)), + best_finalized_source_header_id_at_best_target: Some(HeaderId(BEST_AT_TARGET, BEST_AT_TARGET)), + best_target_header_id: Some(HeaderId(0, 0)), + best_finalized_target_header_id: Some(HeaderId(0, 0)), + nonces_to_submit: None, + nonces_submitted: None, + }; + + // we have some nonces to deliver and they're generated at GENERATED_AT < BEST_AT_SOURCE + let mut strategy = BasicStrategy::new(); + strategy.source_nonces_updated( + HeaderId(GENERATED_AT, GENERATED_AT), + SourceClientNonces { + new_nonces: 0..=10, + confirmed_nonce: None, + }, + ); + strategy.best_target_nonces_updated( + TargetClientNonces { + latest_nonce: 5u64, + nonces_data: (), + }, + &mut race_state, + ); + + // the proof will be generated on source, but using BEST_AT_TARGET block + assert_eq!( + select_nonces_to_deliver(&race_state, &mut strategy), + Some((HeaderId(BEST_AT_TARGET, BEST_AT_TARGET), 6..=10, (),)) + ); + } +} diff --git a/polkadot/relays/messages/src/message_race_receiving.rs b/polkadot/relays/messages/src/message_race_receiving.rs new file mode 100644 index 00000000000..4381b63591f --- /dev/null +++ b/polkadot/relays/messages/src/message_race_receiving.rs @@ -0,0 +1,236 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Message receiving race delivers proof-of-messages-delivery from lane.target to lane.source. + +use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}; +use crate::message_lane_loop::{ + SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient, + TargetClientState, +}; +use crate::message_race_loop::{ + MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, TargetClientNonces, +}; +use crate::message_race_strategy::BasicStrategy; +use crate::metrics::MessageLaneLoopMetrics; + +use async_trait::async_trait; +use bp_messages::MessageNonce; +use futures::stream::FusedStream; +use relay_utils::FailedClient; +use std::{marker::PhantomData, ops::RangeInclusive, time::Duration}; + +/// Message receiving confirmations delivery strategy. +type ReceivingConfirmationsBasicStrategy

= BasicStrategy< +

::TargetHeaderNumber, +

::TargetHeaderHash, +

::SourceHeaderNumber, +

::SourceHeaderHash, + RangeInclusive, +

::MessagesReceivingProof, +>; + +/// Run receiving confirmations race. +pub async fn run( + source_client: impl MessageLaneSourceClient

, + source_state_updates: impl FusedStream>, + target_client: impl MessageLaneTargetClient

, + target_state_updates: impl FusedStream>, + stall_timeout: Duration, + metrics_msg: Option, +) -> Result<(), FailedClient> { + crate::message_race_loop::run( + ReceivingConfirmationsRaceSource { + client: target_client, + metrics_msg: metrics_msg.clone(), + _phantom: Default::default(), + }, + target_state_updates, + ReceivingConfirmationsRaceTarget { + client: source_client, + metrics_msg, + _phantom: Default::default(), + }, + source_state_updates, + stall_timeout, + ReceivingConfirmationsBasicStrategy::

::new(), + ) + .await +} + +/// Messages receiving confirmations race. +struct ReceivingConfirmationsRace

(std::marker::PhantomData

); + +impl MessageRace for ReceivingConfirmationsRace

{ + type SourceHeaderId = TargetHeaderIdOf

; + type TargetHeaderId = SourceHeaderIdOf

; + + type MessageNonce = MessageNonce; + type Proof = P::MessagesReceivingProof; + + fn source_name() -> String { + format!("{}::ReceivingConfirmationsDelivery", P::TARGET_NAME) + } + + fn target_name() -> String { + format!("{}::ReceivingConfirmationsDelivery", P::SOURCE_NAME) + } +} + +/// Message receiving confirmations race source, which is a target of the lane. +struct ReceivingConfirmationsRaceSource { + client: C, + metrics_msg: Option, + _phantom: PhantomData

, +} + +#[async_trait] +impl SourceClient> for ReceivingConfirmationsRaceSource +where + P: MessageLane, + C: MessageLaneTargetClient

, +{ + type Error = C::Error; + type NoncesRange = RangeInclusive; + type ProofParameters = (); + + async fn nonces( + &self, + at_block: TargetHeaderIdOf

, + prev_latest_nonce: MessageNonce, + ) -> Result<(TargetHeaderIdOf

, SourceClientNonces), Self::Error> { + let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; + if let Some(metrics_msg) = self.metrics_msg.as_ref() { + metrics_msg.update_target_latest_received_nonce::

(latest_received_nonce); + } + Ok(( + at_block, + SourceClientNonces { + new_nonces: prev_latest_nonce + 1..=latest_received_nonce, + confirmed_nonce: None, + }, + )) + } + + #[allow(clippy::unit_arg)] + async fn generate_proof( + &self, + at_block: TargetHeaderIdOf

, + nonces: RangeInclusive, + _proof_parameters: Self::ProofParameters, + ) -> Result< + ( + TargetHeaderIdOf

, + RangeInclusive, + P::MessagesReceivingProof, + ), + Self::Error, + > { + self.client + .prove_messages_receiving(at_block) + .await + .map(|(at_block, proof)| (at_block, nonces, proof)) + } +} + +/// Message receiving confirmations race target, which is a source of the lane. +struct ReceivingConfirmationsRaceTarget { + client: C, + metrics_msg: Option, + _phantom: PhantomData

, +} + +#[async_trait] +impl TargetClient> for ReceivingConfirmationsRaceTarget +where + P: MessageLane, + C: MessageLaneSourceClient

, +{ + type Error = C::Error; + type TargetNoncesData = (); + + async fn require_source_header(&self, id: TargetHeaderIdOf

) { + self.client.require_target_header_on_source(id).await + } + + async fn nonces( + &self, + at_block: SourceHeaderIdOf

, + update_metrics: bool, + ) -> Result<(SourceHeaderIdOf

, TargetClientNonces<()>), Self::Error> { + let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?; + if update_metrics { + if let Some(metrics_msg) = self.metrics_msg.as_ref() { + metrics_msg.update_source_latest_confirmed_nonce::

(latest_confirmed_nonce); + } + } + Ok(( + at_block, + TargetClientNonces { + latest_nonce: latest_confirmed_nonce, + nonces_data: (), + }, + )) + } + + async fn submit_proof( + &self, + generated_at_block: TargetHeaderIdOf

, + nonces: RangeInclusive, + proof: P::MessagesReceivingProof, + ) -> Result, Self::Error> { + self.client + .submit_messages_receiving_proof(generated_at_block, proof) + .await?; + Ok(nonces) + } +} + +impl NoncesRange for RangeInclusive { + fn begin(&self) -> MessageNonce { + *RangeInclusive::::start(self) + } + + fn end(&self) -> MessageNonce { + *RangeInclusive::::end(self) + } + + fn greater_than(self, nonce: MessageNonce) -> Option { + let next_nonce = nonce + 1; + let end = *self.end(); + if next_nonce > end { + None + } else { + Some(std::cmp::max(self.begin(), next_nonce)..=end) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn range_inclusive_works_as_nonces_range() { + let range = 20..=30; + + assert_eq!(NoncesRange::begin(&range), 20); + assert_eq!(NoncesRange::end(&range), 30); + assert_eq!(range.clone().greater_than(10), Some(20..=30)); + assert_eq!(range.clone().greater_than(19), Some(20..=30)); + assert_eq!(range.clone().greater_than(20), Some(21..=30)); + assert_eq!(range.clone().greater_than(25), Some(26..=30)); + assert_eq!(range.clone().greater_than(29), Some(30..=30)); + assert_eq!(range.greater_than(30), None); + } +} diff --git a/polkadot/relays/messages/src/message_race_strategy.rs b/polkadot/relays/messages/src/message_race_strategy.rs new file mode 100644 index 00000000000..7088f8d74b5 --- /dev/null +++ b/polkadot/relays/messages/src/message_race_strategy.rs @@ -0,0 +1,488 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Basic delivery strategy. The strategy selects nonces if: +//! +//! 1) there are more nonces on the source side than on the target side; +//! 2) new nonces may be proved to target node (i.e. they have appeared at the +//! block, which is known to the target node). + +use crate::message_race_loop::{NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces}; + +use bp_messages::MessageNonce; +use relay_utils::HeaderId; +use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, ops::RangeInclusive}; + +/// Nonces delivery strategy. +#[derive(Debug)] +pub struct BasicStrategy< + SourceHeaderNumber, + SourceHeaderHash, + TargetHeaderNumber, + TargetHeaderHash, + SourceNoncesRange, + Proof, +> { + /// All queued nonces. + source_queue: VecDeque<(HeaderId, SourceNoncesRange)>, + /// Best nonce known to target node (at its best block). `None` if it has not been received yet. + best_target_nonce: Option, + /// Unused generic types dump. + _phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>, +} + +impl + BasicStrategy +where + SourceHeaderHash: Clone, + SourceHeaderNumber: Clone + Ord, + SourceNoncesRange: NoncesRange, +{ + /// Create new delivery strategy. + pub fn new() -> Self { + BasicStrategy { + source_queue: VecDeque::new(), + best_target_nonce: None, + _phantom: Default::default(), + } + } + + /// Mutable reference to source queue to use in tests. + #[cfg(test)] + pub(crate) fn source_queue_mut( + &mut self, + ) -> &mut VecDeque<(HeaderId, SourceNoncesRange)> { + &mut self.source_queue + } + + /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated + /// data) from source to target node. + /// + /// The `selector` function receives range of nonces and should return `None` if the whole + /// range needs to be delivered. If there are some nonces in the range that can't be delivered + /// right now, it should return `Some` with 'undeliverable' nonces. Please keep in mind that + /// this should be the sub-range that the passed range ends with, because nonces are always + /// delivered in-order. Otherwise the function will panic. + pub fn select_nonces_to_deliver_with_selector( + &mut self, + race_state: &RaceState< + HeaderId, + HeaderId, + Proof, + >, + mut selector: impl FnMut(SourceNoncesRange) -> Option, + ) -> Option> { + // if we do not know best nonce at target node, we can't select anything + let target_nonce = self.best_target_nonce?; + + // if we have already selected nonces that we want to submit, do nothing + if race_state.nonces_to_submit.is_some() { + return None; + } + + // if we already submitted some nonces, do nothing + if race_state.nonces_submitted.is_some() { + return None; + } + + // 1) we want to deliver all nonces, starting from `target_nonce + 1` + // 2) we can't deliver new nonce until header, that has emitted this nonce, is finalized + // by target client + // 3) selector is used for more complicated logic + let best_header_at_target = &race_state.best_finalized_source_header_id_at_best_target.as_ref()?; + let mut nonces_end = None; + while let Some((queued_at, queued_range)) = self.source_queue.pop_front() { + // select (sub) range to deliver + let queued_range_begin = queued_range.begin(); + let queued_range_end = queued_range.end(); + let range_to_requeue = if queued_at.0 > best_header_at_target.0 { + // if header that has queued the range is not yet finalized at bridged chain, + // we can't prove anything + Some(queued_range) + } else { + // selector returns `Some(range)` if this `range` needs to be requeued + selector(queued_range) + }; + + // requeue (sub) range and update range to deliver + match range_to_requeue { + Some(range_to_requeue) => { + assert!( + range_to_requeue.begin() <= range_to_requeue.end() + && range_to_requeue.begin() >= queued_range_begin + && range_to_requeue.end() == queued_range_end, + "Incorrect implementation of internal `selector` function. Expected original\ + range {:?} to end with returned range {:?}", + queued_range_begin..=queued_range_end, + range_to_requeue, + ); + + if range_to_requeue.begin() != queued_range_begin { + nonces_end = Some(range_to_requeue.begin() - 1); + } + self.source_queue.push_front((queued_at, range_to_requeue)); + break; + } + None => { + nonces_end = Some(queued_range_end); + } + } + } + + nonces_end.map(|nonces_end| RangeInclusive::new(target_nonce + 1, nonces_end)) + } +} + +impl + RaceStrategy, HeaderId, Proof> + for BasicStrategy +where + SourceHeaderHash: Clone + Debug, + SourceHeaderNumber: Clone + Ord + Debug, + SourceNoncesRange: NoncesRange + Debug, + TargetHeaderHash: Debug, + TargetHeaderNumber: Debug, + Proof: Debug, +{ + type SourceNoncesRange = SourceNoncesRange; + type ProofParameters = (); + type TargetNoncesData = (); + + fn is_empty(&self) -> bool { + self.source_queue.is_empty() + } + + fn required_source_header_at_target( + &self, + current_best: &HeaderId, + ) -> Option> { + self.source_queue + .back() + .and_then(|(h, _)| if h.0 > current_best.0 { Some(h.clone()) } else { None }) + } + + fn best_at_source(&self) -> Option { + let best_in_queue = self.source_queue.back().map(|(_, range)| range.end()); + match (best_in_queue, self.best_target_nonce) { + (Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => Some(best_in_queue), + (_, Some(best_target_nonce)) => Some(best_target_nonce), + (_, None) => None, + } + } + + fn best_at_target(&self) -> Option { + self.best_target_nonce + } + + fn source_nonces_updated( + &mut self, + at_block: HeaderId, + nonces: SourceClientNonces, + ) { + let best_in_queue = self + .source_queue + .back() + .map(|(_, range)| range.end()) + .or(self.best_target_nonce) + .unwrap_or_default(); + self.source_queue.extend( + nonces + .new_nonces + .greater_than(best_in_queue) + .into_iter() + .map(move |range| (at_block.clone(), range)), + ) + } + + fn best_target_nonces_updated( + &mut self, + nonces: TargetClientNonces<()>, + race_state: &mut RaceState< + HeaderId, + HeaderId, + Proof, + >, + ) { + let nonce = nonces.latest_nonce; + + if let Some(best_target_nonce) = self.best_target_nonce { + if nonce < best_target_nonce { + return; + } + } + + while let Some(true) = self.source_queue.front().map(|(_, range)| range.begin() <= nonce) { + let maybe_subrange = self + .source_queue + .pop_front() + .and_then(|(at_block, range)| range.greater_than(nonce).map(|subrange| (at_block, subrange))); + if let Some((at_block, subrange)) = maybe_subrange { + self.source_queue.push_front((at_block, subrange)); + break; + } + } + + let need_to_select_new_nonces = race_state + .nonces_to_submit + .as_ref() + .map(|(_, nonces, _)| *nonces.end() <= nonce) + .unwrap_or(false); + if need_to_select_new_nonces { + race_state.nonces_to_submit = None; + } + + let need_new_nonces_to_submit = race_state + .nonces_submitted + .as_ref() + .map(|nonces| *nonces.end() <= nonce) + .unwrap_or(false); + if need_new_nonces_to_submit { + race_state.nonces_submitted = None; + } + + self.best_target_nonce = Some(std::cmp::max( + self.best_target_nonce.unwrap_or(nonces.latest_nonce), + nonce, + )); + } + + fn finalized_target_nonces_updated( + &mut self, + nonces: TargetClientNonces<()>, + _race_state: &mut RaceState< + HeaderId, + HeaderId, + Proof, + >, + ) { + self.best_target_nonce = Some(std::cmp::max( + self.best_target_nonce.unwrap_or(nonces.latest_nonce), + nonces.latest_nonce, + )); + } + + fn select_nonces_to_deliver( + &mut self, + race_state: &RaceState< + HeaderId, + HeaderId, + Proof, + >, + ) -> Option<(RangeInclusive, Self::ProofParameters)> { + self.select_nonces_to_deliver_with_selector(race_state, |_| None) + .map(|range| (range, ())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::message_lane::MessageLane; + use crate::message_lane_loop::tests::{header_id, TestMessageLane, TestMessagesProof}; + + type SourceNoncesRange = RangeInclusive; + + type BasicStrategy

= super::BasicStrategy< +

::SourceHeaderNumber, +

::SourceHeaderHash, +

::TargetHeaderNumber, +

::TargetHeaderHash, + SourceNoncesRange, +

::MessagesProof, + >; + + fn source_nonces(new_nonces: SourceNoncesRange) -> SourceClientNonces { + SourceClientNonces { + new_nonces, + confirmed_nonce: None, + } + } + + fn target_nonces(latest_nonce: MessageNonce) -> TargetClientNonces<()> { + TargetClientNonces { + latest_nonce, + nonces_data: (), + } + } + + #[test] + fn strategy_is_empty_works() { + let mut strategy = BasicStrategy::::new(); + assert_eq!(strategy.is_empty(), true); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); + assert_eq!(strategy.is_empty(), false); + } + + #[test] + fn best_at_source_is_never_lower_than_target_nonce() { + let mut strategy = BasicStrategy::::new(); + assert_eq!(strategy.best_at_source(), None); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); + assert_eq!(strategy.best_at_source(), None); + strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); + assert_eq!(strategy.source_queue, vec![]); + assert_eq!(strategy.best_at_source(), Some(10)); + } + + #[test] + fn source_nonce_is_never_lower_than_known_target_nonce() { + let mut strategy = BasicStrategy::::new(); + strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); + assert_eq!(strategy.source_queue, vec![]); + } + + #[test] + fn source_nonce_is_never_lower_than_latest_known_source_nonce() { + let mut strategy = BasicStrategy::::new(); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); + strategy.source_nonces_updated(header_id(2), source_nonces(1..=3)); + strategy.source_nonces_updated(header_id(2), source_nonces(1..=5)); + assert_eq!(strategy.source_queue, vec![(header_id(1), 1..=5)]); + } + + #[test] + fn target_nonce_is_never_lower_than_latest_known_target_nonce() { + let mut strategy = BasicStrategy::::new(); + assert_eq!(strategy.best_target_nonce, None); + strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); + assert_eq!(strategy.best_target_nonce, Some(10)); + strategy.best_target_nonces_updated(target_nonces(5), &mut Default::default()); + assert_eq!(strategy.best_target_nonce, Some(10)); + } + + #[test] + fn updated_target_nonce_removes_queued_entries() { + let mut strategy = BasicStrategy::::new(); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); + strategy.source_nonces_updated(header_id(2), source_nonces(6..=10)); + strategy.source_nonces_updated(header_id(3), source_nonces(11..=15)); + strategy.source_nonces_updated(header_id(4), source_nonces(16..=20)); + strategy.best_target_nonces_updated(target_nonces(15), &mut Default::default()); + assert_eq!(strategy.source_queue, vec![(header_id(4), 16..=20)]); + strategy.best_target_nonces_updated(target_nonces(17), &mut Default::default()); + assert_eq!(strategy.source_queue, vec![(header_id(4), 18..=20)]); + } + + #[test] + fn selected_nonces_are_dropped_on_target_nonce_update() { + let mut state = RaceState::default(); + let mut strategy = BasicStrategy::::new(); + state.nonces_to_submit = Some((header_id(1), 5..=10, (5..=10, None))); + strategy.best_target_nonces_updated(target_nonces(7), &mut state); + assert!(state.nonces_to_submit.is_some()); + strategy.best_target_nonces_updated(target_nonces(10), &mut state); + assert!(state.nonces_to_submit.is_none()); + } + + #[test] + fn submitted_nonces_are_dropped_on_target_nonce_update() { + let mut state = RaceState::default(); + let mut strategy = BasicStrategy::::new(); + state.nonces_submitted = Some(5..=10); + strategy.best_target_nonces_updated(target_nonces(7), &mut state); + assert!(state.nonces_submitted.is_some()); + strategy.best_target_nonces_updated(target_nonces(10), &mut state); + assert!(state.nonces_submitted.is_none()); + } + + #[test] + fn nothing_is_selected_if_something_is_already_selected() { + let mut state = RaceState::default(); + let mut strategy = BasicStrategy::::new(); + state.nonces_to_submit = Some((header_id(1), 1..=10, (1..=10, None))); + strategy.best_target_nonces_updated(target_nonces(0), &mut state); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); + assert_eq!(strategy.select_nonces_to_deliver(&state), None); + } + + #[test] + fn nothing_is_selected_if_something_is_already_submitted() { + let mut state = RaceState::default(); + let mut strategy = BasicStrategy::::new(); + state.nonces_submitted = Some(1..=10); + strategy.best_target_nonces_updated(target_nonces(0), &mut state); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); + assert_eq!(strategy.select_nonces_to_deliver(&state), None); + } + + #[test] + fn select_nonces_to_deliver_works() { + let mut state = RaceState::<_, _, TestMessagesProof>::default(); + let mut strategy = BasicStrategy::::new(); + strategy.best_target_nonces_updated(target_nonces(0), &mut state); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); + strategy.source_nonces_updated(header_id(2), source_nonces(2..=2)); + strategy.source_nonces_updated(header_id(3), source_nonces(3..=6)); + strategy.source_nonces_updated(header_id(5), source_nonces(7..=8)); + + state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); + assert_eq!(strategy.select_nonces_to_deliver(&state), Some((1..=6, ()))); + strategy.best_target_nonces_updated(target_nonces(6), &mut state); + assert_eq!(strategy.select_nonces_to_deliver(&state), None); + + state.best_finalized_source_header_id_at_best_target = Some(header_id(5)); + assert_eq!(strategy.select_nonces_to_deliver(&state), Some((7..=8, ()))); + strategy.best_target_nonces_updated(target_nonces(8), &mut state); + assert_eq!(strategy.select_nonces_to_deliver(&state), None); + } + + #[test] + fn select_nonces_to_deliver_able_to_split_ranges_with_selector() { + let mut state = RaceState::<_, _, TestMessagesProof>::default(); + let mut strategy = BasicStrategy::::new(); + strategy.best_target_nonces_updated(target_nonces(0), &mut state); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=100)); + + state.best_finalized_source_header_id_at_source = Some(header_id(1)); + state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); + state.best_target_header_id = Some(header_id(1)); + + assert_eq!( + strategy.select_nonces_to_deliver_with_selector(&state, |_| Some(50..=100)), + Some(1..=49), + ); + } + + fn run_panic_test_for_incorrect_selector( + invalid_selector: impl Fn(SourceNoncesRange) -> Option, + ) { + let mut state = RaceState::<_, _, TestMessagesProof>::default(); + let mut strategy = BasicStrategy::::new(); + strategy.source_nonces_updated(header_id(1), source_nonces(1..=100)); + strategy.best_target_nonces_updated(target_nonces(50), &mut state); + state.best_finalized_source_header_id_at_source = Some(header_id(1)); + state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); + state.best_target_header_id = Some(header_id(1)); + strategy.select_nonces_to_deliver_with_selector(&state, invalid_selector); + } + + #[test] + #[should_panic] + fn select_nonces_to_deliver_panics_if_selector_returns_empty_range() { + #[allow(clippy::reversed_empty_ranges)] + run_panic_test_for_incorrect_selector(|_| Some(2..=1)) + } + + #[test] + #[should_panic] + fn select_nonces_to_deliver_panics_if_selector_returns_range_that_starts_before_passed_range() { + run_panic_test_for_incorrect_selector(|range| Some(range.begin() - 1..=*range.end())) + } + + #[test] + #[should_panic] + fn select_nonces_to_deliver_panics_if_selector_returns_range_with_mismatched_end() { + run_panic_test_for_incorrect_selector(|range| Some(range.begin()..=*range.end() + 1)) + } +} diff --git a/polkadot/relays/messages/src/metrics.rs b/polkadot/relays/messages/src/metrics.rs new file mode 100644 index 00000000000..51a4118be85 --- /dev/null +++ b/polkadot/relays/messages/src/metrics.rs @@ -0,0 +1,110 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Metrics for message lane relay loop. + +use crate::message_lane::MessageLane; +use crate::message_lane_loop::{SourceClientState, TargetClientState}; + +use bp_messages::MessageNonce; +use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64}; + +/// Message lane relay metrics. +/// +/// Cloning only clones references. +#[derive(Clone)] +pub struct MessageLaneLoopMetrics { + /// Best finalized block numbers - "source", "target", "source_at_target", "target_at_source". + best_block_numbers: GaugeVec, + /// Lane state nonces: "source_latest_generated", "source_latest_confirmed", + /// "target_latest_received", "target_latest_confirmed". + lane_state_nonces: GaugeVec, +} + +impl MessageLaneLoopMetrics { + /// Create and register messages loop metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(MessageLaneLoopMetrics { + best_block_numbers: register( + GaugeVec::new( + Opts::new( + metric_name(prefix, "best_block_numbers"), + "Best finalized block numbers", + ), + &["type"], + )?, + registry, + )?, + lane_state_nonces: register( + GaugeVec::new( + Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), + &["type"], + )?, + registry, + )?, + }) + } +} + +impl MessageLaneLoopMetrics { + /// Update source client state metrics. + pub fn update_source_state(&self, source_client_state: SourceClientState

) { + self.best_block_numbers + .with_label_values(&["source"]) + .set(source_client_state.best_self.0.into()); + self.best_block_numbers + .with_label_values(&["target_at_source"]) + .set(source_client_state.best_finalized_peer_at_best_self.0.into()); + } + + /// Update target client state metrics. + pub fn update_target_state(&self, target_client_state: TargetClientState

) { + self.best_block_numbers + .with_label_values(&["target"]) + .set(target_client_state.best_self.0.into()); + self.best_block_numbers + .with_label_values(&["source_at_target"]) + .set(target_client_state.best_finalized_peer_at_best_self.0.into()); + } + + /// Update latest generated nonce at source. + pub fn update_source_latest_generated_nonce(&self, source_latest_generated_nonce: MessageNonce) { + self.lane_state_nonces + .with_label_values(&["source_latest_generated"]) + .set(source_latest_generated_nonce); + } + + /// Update latest confirmed nonce at source. + pub fn update_source_latest_confirmed_nonce(&self, source_latest_confirmed_nonce: MessageNonce) { + self.lane_state_nonces + .with_label_values(&["source_latest_confirmed"]) + .set(source_latest_confirmed_nonce); + } + + /// Update latest received nonce at target. + pub fn update_target_latest_received_nonce(&self, target_latest_generated_nonce: MessageNonce) { + self.lane_state_nonces + .with_label_values(&["target_latest_received"]) + .set(target_latest_generated_nonce); + } + + /// Update latest confirmed nonce at target. + pub fn update_target_latest_confirmed_nonce(&self, target_latest_confirmed_nonce: MessageNonce) { + self.lane_state_nonces + .with_label_values(&["target_latest_confirmed"]) + .set(target_latest_confirmed_nonce); + } +} diff --git a/polkadot/relays/utils/Cargo.toml b/polkadot/relays/utils/Cargo.toml new file mode 100644 index 00000000000..ff80cab5338 --- /dev/null +++ b/polkadot/relays/utils/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "relay-utils" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +ansi_term = "0.12" +async-std = "1.6.5" +async-trait = "0.1.40" +backoff = "0.2" +isahc = "1.2" +env_logger = "0.8.2" +futures = "0.3.5" +jsonpath_lib = "0.2" +log = "0.4.11" +num-traits = "0.2" +serde_json = "1.0" +sysinfo = "0.15" +time = "0.2" + +# Substrate dependencies + +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/relays/utils/src/initialize.rs b/polkadot/relays/utils/src/initialize.rs new file mode 100644 index 00000000000..7d5f66a5381 --- /dev/null +++ b/polkadot/relays/utils/src/initialize.rs @@ -0,0 +1,95 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Relayer initialization functions. + +use std::{fmt::Display, io::Write}; + +/// Initialize relay environment. +pub fn initialize_relay() { + initialize_logger(true); +} + +/// Initialize Relay logger instance. +pub fn initialize_logger(with_timestamp: bool) { + let mut builder = env_logger::Builder::new(); + builder.filter_level(log::LevelFilter::Warn); + builder.filter_module("bridge", log::LevelFilter::Info); + builder.parse_default_env(); + if with_timestamp { + builder.format(move |buf, record| { + let timestamp = time::OffsetDateTime::try_now_local() + .unwrap_or_else(|_| time::OffsetDateTime::now_utc()) + .format("%Y-%m-%d %H:%M:%S %z"); + + let log_level = color_level(record.level()); + let log_target = color_target(record.target()); + let timestamp = if cfg!(windows) { + Either::Left(timestamp) + } else { + Either::Right(ansi_term::Colour::Fixed(8).bold().paint(timestamp)) + }; + + writeln!(buf, "{} {} {} {}", timestamp, log_level, log_target, record.args(),) + }); + } else { + builder.format(move |buf, record| { + let log_level = color_level(record.level()); + let log_target = color_target(record.target()); + + writeln!(buf, "{} {} {}", log_level, log_target, record.args(),) + }); + } + + builder.init(); +} + +enum Either { + Left(A), + Right(B), +} +impl Display for Either { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Left(a) => write!(fmt, "{}", a), + Self::Right(b) => write!(fmt, "{}", b), + } + } +} + +fn color_target(target: &str) -> impl Display + '_ { + if cfg!(windows) { + Either::Left(target) + } else { + Either::Right(ansi_term::Colour::Fixed(8).paint(target)) + } +} + +fn color_level(level: log::Level) -> impl Display { + if cfg!(windows) { + Either::Left(level) + } else { + let s = level.to_string(); + use ansi_term::Colour as Color; + Either::Right(match level { + log::Level::Error => Color::Fixed(9).bold().paint(s), + log::Level::Warn => Color::Fixed(11).bold().paint(s), + log::Level::Info => Color::Fixed(10).paint(s), + log::Level::Debug => Color::Fixed(14).paint(s), + log::Level::Trace => Color::Fixed(12).paint(s), + }) + } +} diff --git a/polkadot/relays/utils/src/lib.rs b/polkadot/relays/utils/src/lib.rs new file mode 100644 index 00000000000..446e00cd23e --- /dev/null +++ b/polkadot/relays/utils/src/lib.rs @@ -0,0 +1,277 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Utilities used by different relays. + +pub use relay_loop::{relay_loop, relay_metrics}; + +use backoff::{backoff::Backoff, ExponentialBackoff}; +use futures::future::FutureExt; +use std::time::Duration; + +/// Max delay after connection-unrelated error happened before we'll try the +/// same request again. +pub const MAX_BACKOFF_INTERVAL: Duration = Duration::from_secs(60); +/// Delay after connection-related error happened before we'll try +/// reconnection again. +pub const CONNECTION_ERROR_DELAY: Duration = Duration::from_secs(10); + +pub mod initialize; +pub mod metrics; +pub mod relay_loop; + +/// Block number traits shared by all chains that relay is able to serve. +pub trait BlockNumberBase: + 'static + + From + + Into + + Ord + + Clone + + Copy + + Default + + Send + + Sync + + std::fmt::Debug + + std::fmt::Display + + std::hash::Hash + + std::ops::Add + + std::ops::Sub + + num_traits::CheckedSub + + num_traits::Saturating + + num_traits::Zero + + num_traits::One +{ +} + +impl BlockNumberBase for T where + T: 'static + + From + + Into + + Ord + + Clone + + Copy + + Default + + Send + + Sync + + std::fmt::Debug + + std::fmt::Display + + std::hash::Hash + + std::ops::Add + + std::ops::Sub + + num_traits::CheckedSub + + num_traits::Saturating + + num_traits::Zero + + num_traits::One +{ +} + +/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). +#[macro_export] +macro_rules! bail_on_error { + ($result: expr) => { + match $result { + (client, Ok(result)) => (client, result), + (client, Err(error)) => return (client, Err(error)), + } + }; +} + +/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). +#[macro_export] +macro_rules! bail_on_arg_error { + ($result: expr, $client: ident) => { + match $result { + Ok(result) => result, + Err(error) => return ($client, Err(error)), + } + }; +} + +/// Ethereum header Id. +#[derive(Debug, Default, Clone, Copy, Eq, Hash, PartialEq)] +pub struct HeaderId(pub Number, pub Hash); + +/// Error type that can signal connection errors. +pub trait MaybeConnectionError { + /// Returns true if error (maybe) represents connection error. + fn is_connection_error(&self) -> bool; +} + +/// Stringified error that may be either connection-related or not. +#[derive(Debug)] +pub enum StringifiedMaybeConnectionError { + /// The error is connection-related error. + Connection(String), + /// The error is connection-unrelated error. + NonConnection(String), +} + +impl StringifiedMaybeConnectionError { + /// Create new stringified connection error. + pub fn new(is_connection_error: bool, error: String) -> Self { + if is_connection_error { + StringifiedMaybeConnectionError::Connection(error) + } else { + StringifiedMaybeConnectionError::NonConnection(error) + } + } +} + +impl MaybeConnectionError for StringifiedMaybeConnectionError { + fn is_connection_error(&self) -> bool { + match *self { + StringifiedMaybeConnectionError::Connection(_) => true, + StringifiedMaybeConnectionError::NonConnection(_) => false, + } + } +} + +impl ToString for StringifiedMaybeConnectionError { + fn to_string(&self) -> String { + match *self { + StringifiedMaybeConnectionError::Connection(ref err) => err.clone(), + StringifiedMaybeConnectionError::NonConnection(ref err) => err.clone(), + } + } +} + +/// Exponential backoff for connection-unrelated errors retries. +pub fn retry_backoff() -> ExponentialBackoff { + ExponentialBackoff { + // we do not want relayer to stop + max_elapsed_time: None, + max_interval: MAX_BACKOFF_INTERVAL, + ..Default::default() + } +} + +/// Compact format of IDs vector. +pub fn format_ids(mut ids: impl ExactSizeIterator) -> String { + const NTH_PROOF: &str = "we have checked len; qed"; + match ids.len() { + 0 => "".into(), + 1 => format!("{:?}", ids.next().expect(NTH_PROOF)), + 2 => { + let id0 = ids.next().expect(NTH_PROOF); + let id1 = ids.next().expect(NTH_PROOF); + format!("[{:?}, {:?}]", id0, id1) + } + len => { + let id0 = ids.next().expect(NTH_PROOF); + let id_last = ids.last().expect(NTH_PROOF); + format!("{}:[{:?} ... {:?}]", len, id0, id_last) + } + } +} + +/// Stream that emits item every `timeout_ms` milliseconds. +pub fn interval(timeout: Duration) -> impl futures::Stream { + futures::stream::unfold((), move |_| async move { + async_std::task::sleep(timeout).await; + Some(((), ())) + }) +} + +/// Which client has caused error. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum FailedClient { + /// It is the source client who has caused error. + Source, + /// It is the target client who has caused error. + Target, + /// Both clients are failing, or we just encountered some other error that + /// should be treated like that. + Both, +} + +/// Future process result. +#[derive(Debug, Clone, Copy)] +pub enum ProcessFutureResult { + /// Future has been processed successfully. + Success, + /// Future has failed with non-connection error. + Failed, + /// Future has failed with connection error. + ConnectionFailed, +} + +impl ProcessFutureResult { + /// Returns true if result is Success. + pub fn is_ok(self) -> bool { + match self { + ProcessFutureResult::Success => true, + ProcessFutureResult::Failed | ProcessFutureResult::ConnectionFailed => false, + } + } + + /// Returns Ok(true) if future has succeeded. + /// Returns Ok(false) if future has failed with non-connection error. + /// Returns Err if future is `ConnectionFailed`. + pub fn fail_if_connection_error(self, failed_client: FailedClient) -> Result { + match self { + ProcessFutureResult::Success => Ok(true), + ProcessFutureResult::Failed => Ok(false), + ProcessFutureResult::ConnectionFailed => Err(failed_client), + } + } +} + +/// Process result of the future from a client. +pub fn process_future_result( + result: Result, + retry_backoff: &mut ExponentialBackoff, + on_success: impl FnOnce(TResult), + go_offline_future: &mut std::pin::Pin<&mut futures::future::Fuse>, + go_offline: impl FnOnce(Duration) -> TGoOfflineFuture, + error_pattern: impl FnOnce() -> String, +) -> ProcessFutureResult +where + TError: std::fmt::Debug + MaybeConnectionError, + TGoOfflineFuture: FutureExt, +{ + match result { + Ok(result) => { + on_success(result); + retry_backoff.reset(); + ProcessFutureResult::Success + } + Err(error) if error.is_connection_error() => { + log::error!( + target: "bridge", + "{}: {:?}. Going to restart", + error_pattern(), + error, + ); + + retry_backoff.reset(); + go_offline_future.set(go_offline(CONNECTION_ERROR_DELAY).fuse()); + ProcessFutureResult::ConnectionFailed + } + Err(error) => { + let retry_delay = retry_backoff.next_backoff().unwrap_or(CONNECTION_ERROR_DELAY); + log::error!( + target: "bridge", + "{}: {:?}. Retrying in {}", + error_pattern(), + error, + retry_delay.as_secs_f64(), + ); + + go_offline_future.set(go_offline(retry_delay).fuse()); + ProcessFutureResult::Failed + } + } +} diff --git a/polkadot/relays/utils/src/metrics.rs b/polkadot/relays/utils/src/metrics.rs new file mode 100644 index 00000000000..c0eaeae337e --- /dev/null +++ b/polkadot/relays/utils/src/metrics.rs @@ -0,0 +1,162 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +pub use float_json_value::FloatJsonValueMetric; +pub use global::GlobalMetrics; +pub use substrate_prometheus_endpoint::{ + prometheus::core::{Atomic, Collector}, + register, Counter, CounterVec, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, U64, +}; + +use async_trait::async_trait; +use std::{fmt::Debug, time::Duration}; + +mod float_json_value; +mod global; + +/// Unparsed address that needs to be used to expose Prometheus metrics. +#[derive(Debug, Clone)] +pub struct MetricsAddress { + /// Serve HTTP requests at given host. + pub host: String, + /// Serve HTTP requests at given port. + pub port: u16, +} + +/// Prometheus endpoint MetricsParams. +#[derive(Debug, Clone)] +pub struct MetricsParams { + /// Interface and TCP port to be used when exposing Prometheus metrics. + pub address: Option, + /// Metrics registry. May be `Some(_)` if several components share the same endpoint. + pub registry: Option, + /// Prefix that must be used in metric names. + pub metrics_prefix: Option, +} + +/// Metrics API. +pub trait Metrics: Clone + Send + Sync + 'static {} + +impl Metrics for T {} + +/// Standalone metrics API. +/// +/// Metrics of this kind know how to update themselves, so we may just spawn and forget the +/// asynchronous self-update task. +#[async_trait] +pub trait StandaloneMetrics: Metrics { + /// Update metric values. + async fn update(&self); + + /// Metrics update interval. + fn update_interval(&self) -> Duration; + + /// Spawn the self update task that will keep update metric value at given intervals. + fn spawn(self) { + async_std::task::spawn(async move { + let update_interval = self.update_interval(); + loop { + self.update().await; + async_std::task::sleep(update_interval).await; + } + }); + } +} + +impl Default for MetricsAddress { + fn default() -> Self { + MetricsAddress { + host: "127.0.0.1".into(), + port: 9616, + } + } +} + +impl MetricsParams { + /// Creates metrics params so that metrics are not exposed. + pub fn disabled() -> Self { + MetricsParams { + address: None, + registry: None, + metrics_prefix: None, + } + } + + /// Do not expose metrics. + pub fn disable(mut self) -> Self { + self.address = None; + self + } + + /// Set prefix to use in metric names. + pub fn metrics_prefix(mut self, prefix: String) -> Self { + self.metrics_prefix = Some(prefix); + self + } +} + +impl From> for MetricsParams { + fn from(address: Option) -> Self { + MetricsParams { + address, + registry: None, + metrics_prefix: None, + } + } +} + +/// Returns metric name optionally prefixed with given prefix. +pub fn metric_name(prefix: Option<&str>, name: &str) -> String { + if let Some(prefix) = prefix { + format!("{}_{}", prefix, name) + } else { + name.into() + } +} + +/// Set value of gauge metric. +/// +/// If value is `Ok(None)` or `Err(_)`, metric would have default value. +pub fn set_gauge_value, E: Debug>(gauge: &Gauge, value: Result, E>) { + gauge.set(match value { + Ok(Some(value)) => { + log::trace!( + target: "bridge-metrics", + "Updated value of metric '{:?}': {:?}", + gauge.desc().first().map(|d| &d.fq_name), + value, + ); + value + } + Ok(None) => { + log::warn!( + target: "bridge-metrics", + "Failed to update metric '{:?}': value is empty", + gauge.desc().first().map(|d| &d.fq_name), + ); + Default::default() + } + Err(error) => { + log::warn!( + target: "bridge-metrics", + "Failed to update metric '{:?}': {:?}", + gauge.desc().first().map(|d| &d.fq_name), + error, + ); + Default::default() + } + }) +} diff --git a/polkadot/relays/utils/src/metrics/float_json_value.rs b/polkadot/relays/utils/src/metrics/float_json_value.rs new file mode 100644 index 00000000000..d61f9cac7c2 --- /dev/null +++ b/polkadot/relays/utils/src/metrics/float_json_value.rs @@ -0,0 +1,121 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64}; + +use async_trait::async_trait; +use std::time::Duration; + +/// Value update interval. +const UPDATE_INTERVAL: Duration = Duration::from_secs(60); + +/// Metric that represents float value received from HTTP service as float gauge. +#[derive(Debug, Clone)] +pub struct FloatJsonValueMetric { + url: String, + json_path: String, + metric: Gauge, +} + +impl FloatJsonValueMetric { + /// Create new metric instance with given name and help. + pub fn new( + registry: &Registry, + prefix: Option<&str>, + url: String, + json_path: String, + name: String, + help: String, + ) -> Result { + Ok(FloatJsonValueMetric { + url, + json_path, + metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?, + }) + } + + /// Read value from HTTP service. + async fn read_value(&self) -> Result { + use isahc::{AsyncReadResponseExt, HttpClient, Request}; + + fn map_isahc_err(err: impl std::fmt::Display) -> String { + format!("Failed to fetch token price from remote server: {}", err) + } + + let request = Request::get(&self.url) + .header("Accept", "application/json") + .body(()) + .map_err(map_isahc_err)?; + let raw_response = HttpClient::new() + .map_err(map_isahc_err)? + .send_async(request) + .await + .map_err(map_isahc_err)? + .text() + .await + .map_err(map_isahc_err)?; + + parse_service_response(&self.json_path, &raw_response) + } +} + +#[async_trait] +impl StandaloneMetrics for FloatJsonValueMetric { + fn update_interval(&self) -> Duration { + UPDATE_INTERVAL + } + + async fn update(&self) { + crate::metrics::set_gauge_value(&self.metric, self.read_value().await.map(Some)); + } +} + +/// Parse HTTP service response. +fn parse_service_response(json_path: &str, response: &str) -> Result { + let json = serde_json::from_str(response).map_err(|err| { + format!( + "Failed to parse HTTP service response: {:?}. Response: {:?}", + err, response, + ) + })?; + + let mut selector = jsonpath_lib::selector(&json); + let maybe_selected_value = selector(json_path).map_err(|err| { + format!( + "Failed to select value from response: {:?}. Response: {:?}", + err, response, + ) + })?; + let selected_value = maybe_selected_value + .first() + .and_then(|v| v.as_f64()) + .ok_or_else(|| format!("Missing required value from response: {:?}", response,))?; + + Ok(selected_value) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_service_response_works() { + assert_eq!( + parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":433.05}}"#).map_err(drop), + Ok(433.05), + ); + } +} diff --git a/polkadot/relays/utils/src/metrics/global.rs b/polkadot/relays/utils/src/metrics/global.rs new file mode 100644 index 00000000000..d2124805104 --- /dev/null +++ b/polkadot/relays/utils/src/metrics/global.rs @@ -0,0 +1,111 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Global system-wide Prometheus metrics exposed by relays. + +use crate::metrics::{ + metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics, F64, U64, +}; + +use async_std::sync::{Arc, Mutex}; +use async_trait::async_trait; +use std::time::Duration; +use sysinfo::{ProcessExt, RefreshKind, System, SystemExt}; + +/// Global metrics update interval. +const UPDATE_INTERVAL: Duration = Duration::from_secs(10); + +/// Global Prometheus metrics. +#[derive(Debug, Clone)] +pub struct GlobalMetrics { + system: Arc>, + system_average_load: GaugeVec, + process_cpu_usage_percentage: Gauge, + process_memory_usage_bytes: Gauge, +} + +impl GlobalMetrics { + /// Create and register global metrics. + pub fn new(registry: &Registry, prefix: Option<&str>) -> Result { + Ok(GlobalMetrics { + system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), + system_average_load: register( + GaugeVec::new( + Opts::new(metric_name(prefix, "system_average_load"), "System load average"), + &["over"], + )?, + registry, + )?, + process_cpu_usage_percentage: register( + Gauge::new(metric_name(prefix, "process_cpu_usage_percentage"), "Process CPU usage")?, + registry, + )?, + process_memory_usage_bytes: register( + Gauge::new( + metric_name(prefix, "process_memory_usage_bytes"), + "Process memory (resident set size) usage", + )?, + registry, + )?, + }) + } +} + +#[async_trait] +impl StandaloneMetrics for GlobalMetrics { + async fn update(&self) { + // update system-wide metrics + let mut system = self.system.lock().await; + let load = system.get_load_average(); + self.system_average_load.with_label_values(&["1min"]).set(load.one); + self.system_average_load.with_label_values(&["5min"]).set(load.five); + self.system_average_load.with_label_values(&["15min"]).set(load.fifteen); + + // update process-related metrics + let pid = sysinfo::get_current_pid().expect( + "only fails where pid is unavailable (os=unknown || arch=wasm32);\ + relay is not supposed to run in such MetricsParamss;\ + qed", + ); + let is_process_refreshed = system.refresh_process(pid); + match (is_process_refreshed, system.get_process(pid)) { + (true, Some(process_info)) => { + let cpu_usage = process_info.cpu_usage() as f64; + let memory_usage = process_info.memory() * 1024; + log::trace!( + target: "bridge-metrics", + "Refreshed process metrics: CPU={}, memory={}", + cpu_usage, + memory_usage, + ); + + self.process_cpu_usage_percentage + .set(if cpu_usage.is_finite() { cpu_usage } else { 0f64 }); + self.process_memory_usage_bytes.set(memory_usage); + } + _ => { + log::warn!( + target: "bridge-metrics", + "Failed to refresh process information. Metrics may show obsolete values", + ); + } + } + } + + fn update_interval(&self) -> Duration { + UPDATE_INTERVAL + } +} diff --git a/polkadot/relays/utils/src/relay_loop.rs b/polkadot/relays/utils/src/relay_loop.rs new file mode 100644 index 00000000000..8790b0913e1 --- /dev/null +++ b/polkadot/relays/utils/src/relay_loop.rs @@ -0,0 +1,256 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics}; +use crate::{FailedClient, MaybeConnectionError}; + +use async_trait::async_trait; +use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration}; +use substrate_prometheus_endpoint::{init_prometheus, Registry}; + +/// Default pause between reconnect attempts. +pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); + +/// Basic blockchain client from relay perspective. +#[async_trait] +pub trait Client: Clone + Send + Sync { + /// Type of error this clients returns. + type Error: Debug + MaybeConnectionError; + + /// Try to reconnect to source node. + async fn reconnect(&mut self) -> Result<(), Self::Error>; +} + +/// Returns generic loop that may be customized and started. +pub fn relay_loop(source_client: SC, target_client: TC) -> Loop { + Loop { + reconnect_delay: RECONNECT_DELAY, + source_client, + target_client, + loop_metric: None, + } +} + +/// Returns generic relay loop metrics that may be customized and used in one or several relay loops. +pub fn relay_metrics(prefix: Option, params: MetricsParams) -> LoopMetrics<(), (), ()> { + LoopMetrics { + relay_loop: Loop { + reconnect_delay: RECONNECT_DELAY, + source_client: (), + target_client: (), + loop_metric: None, + }, + address: params.address, + registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), + metrics_prefix: params.metrics_prefix, + loop_metric: None, + } +} + +/// Generic relay loop. +pub struct Loop { + reconnect_delay: Duration, + source_client: SC, + target_client: TC, + loop_metric: Option, +} + +/// Relay loop metrics builder. +pub struct LoopMetrics { + relay_loop: Loop, + address: Option, + registry: Registry, + metrics_prefix: Option, + loop_metric: Option, +} + +impl Loop { + /// Customize delay between reconnect attempts. + pub fn reconnect_delay(mut self, reconnect_delay: Duration) -> Self { + self.reconnect_delay = reconnect_delay; + self + } + + /// Start building loop metrics using given prefix. + pub fn with_metrics(self, prefix: Option, params: MetricsParams) -> LoopMetrics { + LoopMetrics { + relay_loop: Loop { + reconnect_delay: self.reconnect_delay, + source_client: self.source_client, + target_client: self.target_client, + loop_metric: None, + }, + address: params.address, + registry: params.registry.unwrap_or_else(|| create_metrics_registry(prefix)), + metrics_prefix: params.metrics_prefix, + loop_metric: None, + } + } + + /// Run relay loop. + /// + /// This function represents an outer loop, which in turn calls provided `run_loop` function to do + /// actual job. When `run_loop` returns, this outer loop reconnects to failed client (source, + /// target or both) and calls `run_loop` again. + pub async fn run(mut self, run_loop: R) -> Result<(), String> + where + R: Fn(SC, TC, Option) -> F, + F: Future>, + SC: Client, + TC: Client, + LM: Clone, + { + loop { + let result = run_loop( + self.source_client.clone(), + self.target_client.clone(), + self.loop_metric.clone(), + ) + .await; + + match result { + Ok(()) => break, + Err(failed_client) => loop { + async_std::task::sleep(self.reconnect_delay).await; + if failed_client == FailedClient::Both || failed_client == FailedClient::Source { + match self.source_client.reconnect().await { + Ok(()) => (), + Err(error) => { + log::warn!( + target: "bridge", + "Failed to reconnect to source client. Going to retry in {}s: {:?}", + self.reconnect_delay.as_secs(), + error, + ); + continue; + } + } + } + if failed_client == FailedClient::Both || failed_client == FailedClient::Target { + match self.target_client.reconnect().await { + Ok(()) => (), + Err(error) => { + log::warn!( + target: "bridge", + "Failed to reconnect to target client. Going to retry in {}s: {:?}", + self.reconnect_delay.as_secs(), + error, + ); + continue; + } + } + } + + break; + }, + } + + log::debug!(target: "bridge", "Restarting relay loop"); + } + + Ok(()) + } +} + +impl LoopMetrics { + /// Add relay loop metrics. + /// + /// Loop metrics will be passed to the loop callback. + pub fn loop_metric( + self, + create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, + ) -> Result, String> { + let loop_metric = create_metric(&self.registry, self.metrics_prefix.as_deref()).map_err(|e| e.to_string())?; + + Ok(LoopMetrics { + relay_loop: self.relay_loop, + address: self.address, + registry: self.registry, + metrics_prefix: self.metrics_prefix, + loop_metric: Some(loop_metric), + }) + } + + /// Add standalone metrics. + pub fn standalone_metric( + self, + create_metric: impl FnOnce(&Registry, Option<&str>) -> Result, + ) -> Result { + // since standalone metrics are updating themselves, we may just ignore the fact that the same + // standalone metric is exposed by several loops && only spawn single metric + match create_metric(&self.registry, self.metrics_prefix.as_deref()) { + Ok(standalone_metrics) => standalone_metrics.spawn(), + Err(PrometheusError::AlreadyReg) => (), + Err(e) => return Err(e.to_string()), + } + + Ok(self) + } + + /// Convert into `MetricsParams` structure so that metrics registry may be extended later. + pub fn into_params(self) -> MetricsParams { + MetricsParams { + address: self.address, + registry: Some(self.registry), + metrics_prefix: self.metrics_prefix, + } + } + + /// Expose metrics using address passed at creation. + /// + /// If passed `address` is `None`, metrics are not exposed. + pub async fn expose(self) -> Result, String> { + if let Some(address) = self.address { + let socket_addr = SocketAddr::new( + address.host.parse().map_err(|err| { + format!( + "Invalid host {} is used to expose Prometheus metrics: {}", + address.host, err, + ) + })?, + address.port, + ); + + let registry = self.registry; + async_std::task::spawn(async move { + let result = init_prometheus(socket_addr, registry).await; + log::trace!( + target: "bridge-metrics", + "Prometheus endpoint has exited with result: {:?}", + result, + ); + }); + } + + Ok(Loop { + reconnect_delay: self.relay_loop.reconnect_delay, + source_client: self.relay_loop.source_client, + target_client: self.relay_loop.target_client, + loop_metric: self.loop_metric, + }) + } +} + +/// Create new registry with global metrics. +fn create_metrics_registry(prefix: Option) -> Registry { + match prefix { + Some(prefix) => { + assert!(!prefix.is_empty(), "Metrics prefix can not be empty"); + Registry::new_custom(Some(prefix), None).expect("only fails if prefix is empty; prefix is not empty; qed") + } + None => Registry::new(), + } +} diff --git a/polkadot/rustfmt.toml b/polkadot/rustfmt.toml new file mode 100644 index 00000000000..8ded863e80a --- /dev/null +++ b/polkadot/rustfmt.toml @@ -0,0 +1,3 @@ +hard_tabs = true +max_width = 120 +edition = "2018" diff --git a/polkadot/scripts/add_license.sh b/polkadot/scripts/add_license.sh new file mode 100755 index 00000000000..49864b47c05 --- /dev/null +++ b/polkadot/scripts/add_license.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +PAT_GPL="^// Copyright.*If not, see \.$" +PAT_OTHER="^// Copyright" + +SCRIPTS_DIR=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) + +for f in $(find . -type f | egrep '\.(c|cpp|rs)$'); do + HEADER=$(head -16 $f) + if [[ $HEADER =~ $PAT_GPL ]]; then + BODY=$(tail -n +17 $f) + cat $SCRIPTS_DIR/license_header > temp + echo "$BODY" >> temp + mv temp $f + elif [[ $HEADER =~ $PAT_OTHER ]]; then + echo "Other license was found do nothing" + else + echo "$f was missing header" + cat $SCRIPTS_DIR/license_header $f > temp + mv temp $f + fi +done diff --git a/polkadot/scripts/ci-cache.sh b/polkadot/scripts/ci-cache.sh new file mode 100755 index 00000000000..040d44fa74a --- /dev/null +++ b/polkadot/scripts/ci-cache.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -xeu + +echo $CARGO_TARGET_DIR; +mkdir -p $CARGO_TARGET_DIR; +echo "Current Rust nightly version:"; +rustc +nightly --version; +echo "Cached Rust nightly version:"; +if [ ! -f $CARGO_TARGET_DIR/check_nightly_rust ]; then + echo "" > $CARGO_TARGET_DIR/check_nightly_rust; +fi +cat $CARGO_TARGET_DIR/check_nightly_rust; +if [[ $(cat $CARGO_TARGET_DIR/check_nightly_rust) == $(rustc +nightly --version) ]]; then + echo "The Rust nightly version has not changed"; +else + echo "The Rust nightly version has changed. Clearing the cache"; + rm -rf $CARGO_TARGET_DIR/*; +fi diff --git a/polkadot/scripts/dump-logs.sh b/polkadot/scripts/dump-logs.sh new file mode 100644 index 00000000000..f076cbccbca --- /dev/null +++ b/polkadot/scripts/dump-logs.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# A script to dump logs from selected important docker containers +# to make it easier to analyze locally. + +set -xeu + +DATE=$(date +"%Y-%m-%d-%T") +LOGS_DIR="${DATE//:/-}-logs" +mkdir $LOGS_DIR +cd $LOGS_DIR + +# From $ docker ps --format '{{.Names}}' + +SERVICES=(\ + deployments_relay-messages-millau-to-rialto-generator_1 \ + deployments_relay-messages-rialto-to-millau-generator_1 \ + deployments_relay-messages-millau-to-rialto_1 \ + deployments_relay-messages-rialto-to-millau_1 \ + deployments_relay-headers-millau-to-rialto_1 \ + deployments_relay-headers-rialto-to-millau_1 \ + deployments_rialto-node-alice_1 \ + deployments_rialto-node-bob_1 \ + deployments_millau-node-alice_1 \ + deployments_millau-node-bob_1 \ +) + +for SVC in ${SERVICES[*]} +do + SHORT_NAME="${SVC//deployments_/}" + docker logs $SVC &> $SHORT_NAME.log +done + +cd - +tar cvjf $LOGS_DIR.tar.bz2 $LOGS_DIR diff --git a/polkadot/scripts/license_header b/polkadot/scripts/license_header new file mode 100644 index 00000000000..f9b301209bb --- /dev/null +++ b/polkadot/scripts/license_header @@ -0,0 +1,16 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + diff --git a/polkadot/scripts/run-eth2sub-relay.sh b/polkadot/scripts/run-eth2sub-relay.sh new file mode 100755 index 00000000000..2cf64a93780 --- /dev/null +++ b/polkadot/scripts/run-eth2sub-relay.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Run a development instance of the Ethereum to Substrate relay. Needs running +# Substrate and Ethereum nodes in order to work. + +RUST_LOG=rpc=trace,bridge=trace ./target/debug/ethereum-poa-relay eth-to-sub diff --git a/polkadot/scripts/run-openethereum-node.sh b/polkadot/scripts/run-openethereum-node.sh new file mode 100755 index 00000000000..62089baffe4 --- /dev/null +++ b/polkadot/scripts/run-openethereum-node.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# This script assumes that an OpenEthereum build is available. The repo +# should be at the same level as the `parity-bridges-common` repo. + +RUST_LOG=rpc=trace,txqueue=trace,bridge-builtin=trace \ +../openethereum/target/debug/openethereum \ + --config="$(pwd)"/deployments/dev/poa-config/poa-node-config \ + --node-key=arthur \ + --engine-signer=0x005e714f896a8b7cede9d38688c1a81de72a58e4 \ + --base-path=/tmp/oe-dev-node \ diff --git a/polkadot/scripts/send-message-from-millau-rialto.sh b/polkadot/scripts/send-message-from-millau-rialto.sh new file mode 100755 index 00000000000..10fe24087fa --- /dev/null +++ b/polkadot/scripts/send-message-from-millau-rialto.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Used for manually sending a message to a running network. +# +# You could for example spin up a full network using the Docker Compose files +# we have (to make sure the message relays are running), but remove the message +# generator service. From there you may submit messages manually using this script. + +MILLAU_PORT="${RIALTO_PORT:-9945}" + +case "$1" in + remark) + RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ + ./target/debug/substrate-relay send-message MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --source-signer //Alice \ + --target-signer //Bob \ + --lane 00000000 \ + --origin Target \ + remark \ + ;; + transfer) + RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ + ./target/debug/substrate-relay send-message MillauToRialto \ + --source-host localhost \ + --source-port $MILLAU_PORT \ + --source-signer //Alice \ + --target-signer //Bob \ + --lane 00000000 \ + --origin Target \ + transfer \ + --amount 100000000000000 \ + --recipient 5DZvVvd1udr61vL7Xks17TFQ4fi9NiagYLaBobnbPCP14ewA \ + ;; + *) echo "A message type is require. Supported messages: remark, transfer."; exit 1;; +esac diff --git a/polkadot/scripts/send-message-from-rialto-millau.sh b/polkadot/scripts/send-message-from-rialto-millau.sh new file mode 100755 index 00000000000..52d19e3af88 --- /dev/null +++ b/polkadot/scripts/send-message-from-rialto-millau.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Used for manually sending a message to a running network. +# +# You could for example spin up a full network using the Docker Compose files +# we have (to make sure the message relays are running), but remove the message +# generator service. From there you may submit messages manually using this script. + +RIALTO_PORT="${RIALTO_PORT:-9944}" + +case "$1" in + remark) + RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ + ./target/debug/substrate-relay send-message RialtoToMillau \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ + --source-signer //Bob \ + --lane 00000000 \ + --origin Target \ + remark \ + ;; + transfer) + RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ + ./target/debug/substrate-relay send-message RialtoToMillau \ + --source-host localhost \ + --source-port $RIALTO_PORT \ + --target-signer //Alice \ + --source-signer //Bob \ + --lane 00000000 \ + --origin Target \ + transfer \ + --amount 100000000000000 \ + --recipient 5DZvVvd1udr61vL7Xks17TFQ4fi9NiagYLaBobnbPCP14ewA \ + ;; + *) echo "A message type is require. Supported messages: remark, transfer."; exit 1;; +esac diff --git a/polkadot/scripts/update-weights.sh b/polkadot/scripts/update-weights.sh new file mode 100755 index 00000000000..0ac773e8d7b --- /dev/null +++ b/polkadot/scripts/update-weights.sh @@ -0,0 +1,31 @@ +#!/bin/sh +# +# Runtime benchmarks for the `pallet-bridge-messages` and `pallet-bridge-grandpa` pallets. +# +# Run this script from root of the repo. + +set -eux + +time cargo run --release -p rialto-bridge-node --features=runtime-benchmarks -- benchmark \ + --chain=dev \ + --steps=50 \ + --repeat=20 \ + --pallet=pallet_bridge_messages \ + --extrinsic=* \ + --execution=wasm \ + --wasm-execution=Compiled \ + --heap-pages=4096 \ + --output=./modules/messages/src/weights.rs \ + --template=./.maintain/rialto-weight-template.hbs + +time cargo run --release -p rialto-bridge-node --features=runtime-benchmarks -- benchmark \ + --chain=dev \ + --steps=50 \ + --repeat=20 \ + --pallet=pallet_bridge_grandpa \ + --extrinsic=* \ + --execution=wasm \ + --wasm-execution=Compiled \ + --heap-pages=4096 \ + --output=./modules/grandpa/src/weights.rs \ + --template=./.maintain/rialto-weight-template.hbs diff --git a/polkadot/scripts/update_substrate.sh b/polkadot/scripts/update_substrate.sh new file mode 100755 index 00000000000..f7715bda5d1 --- /dev/null +++ b/polkadot/scripts/update_substrate.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +# One-liner to update between Substrate releases +# Usage: ./update_substrate.sh 2.0.0-rc6 2.0.0 +set -xeu + +OLD_VERSION=$1 +NEW_VERSION=$2 + +find . -type f -name 'Cargo.toml' -exec sed -i '' -e "s/$OLD_VERSION/$NEW_VERSION/g" {} \; -- GitLab